1 selenium概述
1.什么是selenium?
(1)Selenium是一个用于Web应用程序测试的工具。
(2)Selenium 测试直接运行在浏览器中,就像真正的用户在操作一样。
(3)支持通过各种driver(FirfoxDriver,IternetExplorerDriver,OperaDriver,ChromeDriver)驱动真实浏览器完成测试。
(4)selenium也是支持无界面浏览器操作的。
2.为什么使用selenium?
模拟浏览器功能,自动执行网页中的js代码,实现动态加载
3.如何安装selenium?
(1)操作谷歌浏览器驱动下载地址
http://chromedriver.storage.googleapis.com/index.html
(2)谷歌驱动和谷歌浏览器版本之间的映射表
http://blog.csdn.net/huilan_same/article/details/51896672
(3)查看谷歌浏览器版本
谷歌浏览器右上角‐‐>帮助‐‐>关于
(4)安装selenium4.1.1版本
pip install selenium==4.1.0
4.selenium的使用步骤?
(1)导入:from selenium import webdriver
(2)创建谷歌浏览器操作对象:
a、如果驱动路径和采集数据的py文件在同一个目录时,创建对象如下:
browser = webdriver.Chrome()
b、如果驱动路径和采集数据的py文件不在同一个目录时,创建对象如下:
path = 谷歌浏览器驱动文件路径
browser = webdriver.Chrome(path)
(3)访问网址
url = 要访问的网址
browser.get(url)
5、selenium的元素定位?
元素定位:自动化要做的就是模拟鼠标和键盘来操作来操作这些元素,点击、输入等等。操作这些元素前首先要找到它们,WebDriver提供很多定位元素的方法方法,现以百度网页为例
driver.find_element_by_id("kw")
driver.find_element_by_name("wd")
driver.find_element_by_class_name("s_ipt")
driver.find_element_by_tag_name("input")
driver.find_element_by_xpath("//*[@id='kw']")
driver.find_element_by_xpath("//*[@name='wd']")
driver.find_element_by_xpath("//input[@class='s_ipt']")
driver.find_element_by_xpath("/html/body/form/span/input")
driver.find_element_by_xpath("//span[@class='soutu-btn']/input")
driver.find_element_by_xpath("//form[@id='form']/span/input")
driver.find_element_by_xpath("//input[@id='kw' and @name='wd']")
driver.find_element_by_css_selector("#kw")
driver.find_element_by_css_selector("[name=wd]")
driverr.find_element_by_css_selector(".s_ipt")
driver.find_element_by_css_selector("html > body > form > span > input")
driver.find_element_by_css_selector("span.soutu-btn> input#kw")
driver.find_element_by_css_selector("form#form > span > input")
driver.find_element_by_link_text("新闻")
driver.find_element_by_link_text("hao123")
driver.find_element_by_partial_link_text("新")
driver.find_element_by_partial_link_text("hao")
driver.find_element_by_partial_link_text("123")
find_elements_by_id()
find_elements_by_name()
find_elements_by_class_name()
find_elements_by_tag_name()
find_elements_by_link_text()
find_elements_by_partial_link_text()
find_elements_by_xpath()
find_elements_by_css_selector()
2 selenium访问百度网页输入"python"
from selenium import webdriver
import time
url='https://www.baidu.com/'
brower=webdriver.Chrome()
brower.get(url)
time.sleep(10)
brower.find_element_by_id("kw").send_keys('python')
time.sleep(10)
brower.find_element_by_id("su").click()
time.sleep(20)
selenium常用配置,模拟用户操作百度网页,将以上代码改为:
from selenium import webdriver
import time
options=webdriver.ChromeOptions()
options.add_experimental_option('detach',True)
options.add_argument('--disable-extensions')
options.add_argument('--disable-popup-blocking')
diver=webdriver.Chrome(options=options)
diver.get('https://www.baidu.com/')
diver.maximize_window()
diver.find_element_by_id("kw").send_keys('python')
time.sleep(3)
diver.find_element_by_id("su").click()
time.sleep(5)
3、selenium采集BOSS直聘数据
from selenium import webdriver
from selenium.webdriver.common.by import By
from time import sleep
import time
def spider(url):
driver.get(url)
sleep(3)
search = driver.find_element(By.XPATH, '//*[@id="wrap"]/div[3]/div/div[1]/div[1]/form/div[2]/p/input')
search.send_keys("数据分析师")
sleep(2)
submit = driver.find_element(By.XPATH, '//*[@id="wrap"]/div[3]/div/div[1]/div[1]/form/button')
submit.click()
print('搜索结束,开始寻找信息')
driver.implicitly_wait(5)
sleep(3)
parser(driver)
ye = 2
while (ye < 5):
print("正在爬去第{}页".format(ye))
next_page = driver.find_element_by_class_name("ui-icon-arrow-right")
next_page.click()
sleep(5)
parser(driver)
ye = ye + 1
def parser(driver):
lis = driver.find_elements(By.CSS_SELECTOR, '.job-card-wrapper')
print('定位完成')
for li in lis:
jobname = li.find_element(By.CSS_SELECTOR, '.job-name').text
info_data = li.find_element(By.CSS_SELECTOR, '.job-card-left').get_attribute('href')
jobarea = li.find_element(By.CSS_SELECTOR, '.job-area').text
slary = li.find_element(By.CSS_SELECTOR, '.salary').text
print('工作名字:' + jobname + ' 工作场所:', jobarea + ' 工资:' + slary + ' 详细链接:' + info_data)
print('\n')
if __name__ == '__main__':
driver=webdriver.Chrome()
driver.maximize_window()
url='https://www.zhipin.com/zhengzhou/?sid=sem_pz_360pc_title'
spider(url)
4、selenium采集中国大学MOOC课程信息
4.1单页采集mooc课程评论数据
import requests
url='https://www.icourse163.org/course/ZJU-200001'
headers={"user-agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4214.3 Safari/537.36"}
re=requests.get(url,headers=headers)
re.encoding="utf-8"
response=re.text
print(response)
from selenium import webdriver
import time
def spider(url):
driver.get(url)
driver.implicitly_wait(3)
button=driver.find_element_by_id("review-tag-button")
button.click()
time.sleep(5)
paser(driver)
def paser(driver):
info=driver.find_elements_by_class_name("ux-mooc-comment-course-comment_comment-list_item_body")
all=[]
name=[]
content=[]
time_f=[]
course_count=[]
like_count=[]
for i in info:
all.append(i.text.split("\n"))
for a in all:
name.append(a[0])
content.append(a[1])
time_f.append(a[2])
course_count.append(a[3])
like_count.append(a[4])
all_info=list(zip(name,content,time_f,course_count,like_count))
print(all_info)
if __name__ == '__main__':
driver=webdriver.Chrome()
url='https://www.icourse163.org/course/ZJU-200001?from=searchPage&outVendor=zw_mooc_pcssjg_'
spider(url)
2、分页采集mooc课程评论数据并保存到csv中
from selenium import webdriver
import time
import csv
def spider(url):
driver.get(url)
driver.implicitly_wait(3)
button=driver.find_element_by_id("review-tag-button")
button.click()
time.sleep(5)
paser(driver)
ye=2
while(ye<5):
print("正在爬去第{}页".format(ye))
next_page=driver.find_element_by_class_name("ux-pager_btn__next")
next_page.click()
time.sleep(5)
paser(driver)
ye=ye+1
def paser(driver):
info=driver.find_elements_by_class_name("ux-mooc-comment-course-comment_comment-list_item_body")
all=[]
name=[]
content=[]
time_f=[]
course_count=[]
like_count=[]
for i in info:
all.append(i.text.split("\n"))
for a in all:
name.append(a[0])
content.append(a[1])
time_f.append(a[2])
course_count.append(a[3])
like_count.append(a[4])
all_info=list(zip(name,content,time_f,course_count,like_count))
save(all_info)
def save(all_info):
with open("mooc.csv","a+",newline="",encoding="utf-8") as f:
w=csv.writer(f)
for row in all_info:
w.writerow(row)
if __name__ == '__main__':
with open("mooc.csv","a+",newline="",encoding="utf-8") as f:
w=csv.writer(f)
w.writerow(["评论者","评论内容","发表时间","开课次数","点赞数"])
driver=webdriver.Chrome()
url='https://www.icourse163.org/course/ZJU-200001?from=searchPage&outVendor=zw_mooc_pcssjg_'
spider(url)