python爬虫——从百度跳到微博等等一系列操作
稍微很乱,也不管了
# 时间:2022/4/2610:22
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import Select
import time
driver=webdriver.Chrome()
driver.implicitly_wait(10)
driver.get('http://www.baidu.com')
#前进回退
driver.get('https://e.weibo.com/register/register')
driver.get('https://www.xyc.edu.cn')
driver.back()
driver.back()
driver.forward()
driver.forward()
#页面切换
time.sleep(3)
driver.find_element(By.LINK_TEXT,'新闻').click()
time.sleep(3)
#对应前面的页面
driver.switch_to.window(driver.window_handles[0])
time.sleep(3)
#切换到下一个页面
driver.switch_to.window(driver.window_handles[1])
#弹窗
#js='alert("上午")'
#driver.execute_script(js)
#time.sleep(3)
#切换到弹窗窗口
#al=driver.switch_to.alert
#print(al.text)
#al.dismiss()
#driver.get('https://e.weibo.com/register/register')
#切换到子页面
#ifname=driver.find_element(By.ID,'reg_table')
#driver.switch_to.frame(ifname)
#select=Select(driver.find_element(By.NAME,'province'))
#select.select_by_index(1)
#time.sleep(3)
#select.select_by_value('50')
#time.sleep(3)
#select.select_by_visible_text('广西')
#driver.get('http://www.baidu.com')
#鼠标移动
# ac=driver.find_element(By.PARTIAL_LINK_TEXT,'hao')
# ac2=driver.find_element(By.LINK_TEXT,'百度一下')
#ActionChains(driver).move_to_element(ac).perform()
#ActionChains(driver).context_click(ac).perform()
#ActionChains(driver).drag_and_drop(ac,ac2).perform()
#time.sleep(3)
#driver.find_element(By.NAME,'wd').send_keys('新余')
#driver.find_element(By.TAG_NAME,'title').text
#print(driver.find_element(By.XPATH,'//div[@id="s-top-left"]').text)
#driver.find_element(By.LINK_TEXT,'新闻').click()
#print(driver.find_element(By.css_SELECTOR,'div#s-top-left').text)
#time.sleep(5)
driver.quit()