51job数据分析
from selenium.webdriver import Chrome
from bs4 import BeautifulSoup
import csv
from selenium.webdriver.common.keys import Keys
import re
import time, json
f = open('files/数据分析.csv', 'a', encoding='utf-8')
writer = csv.writer(f)
writer.writerow(['岗位', '薪资', '公司', '地址'])
b = Chrome()
def get_net_data():
b.get('https://www.51job.com/')
search_input = b.find_element_by_css_selector('#kwdselectid')
search_input.send_keys('数据分析')
search_input.send_keys(Keys.ENTER)
while True:
page_div = b.find_element_by_css_selector('.rt.rt_page')
pages = page_div.text.split('/')
all_page = int(pages[-1])
current_page = int(pages[0])
save_data(b.page_source)
if current_page < 10:
next = b.find_element_by_css_selector('.next')
next.click()
else:
break
def save_data(html: str):
soup = BeautifulSoup(html, 'lxml')
all_job_div = soup.select('.j_joblist>.e')
one_page_jobs = []
for job_div in all_job_div:
name = job_div.select_one('.jname.at').get_text()
sal = job_div.select_one('.sal').get_text()
company = job_div.select_one('.cname.at').get_text()
job_url = job_div.select_one('.el').attrs['href']
one_page_jobs.append([name, sal, company, job_url])
writer.writerows(one_page_jobs)
if __name__ == '__main__':
get_net_data()
前进后退和切换选项卡
import time
from selenium.webdriver import Chrome
"""
b = Chrome()
b.get('https://www.baidu.com')
time.sleep(1)
b.get('https://www.runoob.com')
time.sleep(1)
b.get('https://movie.douban.com/top250')
time.sleep(1)
b.back()
time.sleep(1)
b.forward()
"""
b = Chrome()
b.get('https://movie.douban.com/')
music = b.find_element_by_css_selector('.global-nav-items>ul>li:nth-child(4)>a')
url = music.get_attribute('href')
music.click()
time.sleep(1)
b.switch_to.window(b.window_handles[0])
b.get('https://movie.douban.com/')
time.sleep(1)
b.switch_to.window(b.window_handles[1])
b.get(url)
等待
from selenium.webdriver import Chrome
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
b = Chrome
b.get('https://read.douban.com/')
"""
1.隐式等待
如果设置了隐式等待时间,那么浏览器对象在通过find_element相关方法获取标签的时候,在找不到对应的时候不会马上报错,
而是在指定时间内不断尝试获取该标签,如果超过了指定时间还是获取不到才会报错
"""
b.implicitly_wait(2)
"""
2.显示等待
1)创建等待对象:WebDriverWait(浏览器对象,超时时间,检测时间间隔=500ms)
2)
等待对象.until(条件) - 等到指定条件为True获取对应的标签或者结果。如果超过了时间这个条件都不成了,会异常
等待对象.until_not(条件) - 等到指定条件为False获取对应的标签
常见的条件:
EC.presence_of_element_located((By.标签获取方式,获取方式值)):判断某个元素是否被加到dom树里(判断某个标签是否加载到网页中,不一定可见),条件成立的时候返回对应的标签
EC.visibility_of_element_located:判断某个标签是否可见(没有隐藏,并且元素的宽度和高度都不等于0),条件成立的时候返回对应的标签
EC.text_to_be_present_in_element:判断某个标签中的标签内容是否 包含 了预期的字符串,条件成立的时候返回布尔True
EC.text_to_be_present_in_element_value:判断某个标签中的value属性是否包含了预期的字符串,条件成立的时候返回布尔True
EC.element_to_be_clickable:判断某个标签是否可以点击,条件成立的时候返回对应的标签
"""
a = b.find_element_by_css_selector('.widget-channel-links.section-container>a:nth-child(3)')
a.click()
wait = WebDriverWait(b, 10)
a = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.widget-channel-links.section-container>a:nth-child(3)')))
a.click()
wait = WebDriverWait(b, 10)
a = wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, '.widget-channel-links.section-container>a:nth-child(3)'), '女性'))
print(a)
京东页面滚动
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.common.keys import Keys
import time
options = ChromeOptions()
options.add_experimental_option("excludeSwitches", ['enable-automation', 'enable-logging'])
options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})
b = Chrome(options=options)
b.get('https://www.jd.com')
b.implicitly_wait(2)
search_input = b.find_element_by_id('key')
search_input.send_keys('电脑')
search_input.send_keys(Keys.ENTER)
time.sleep(2)
max_height = 8900
height = 500
while True:
b.execute_script(f'window.scrollTo(0,{height})')
height += 500
time.sleep(1)
if height > max_height:
break
b.execute_script('alert("To Bottom")')
网易邮箱
from selenium.webdriver import Chrome
"""
前端在实现网页功能的时候可能出现网页中嵌套网页的现象,如果要在一个网页中嵌套另外一个网页,必须使用iframe标签。
selenium爬取的时候,通过浏览器对象默认获取到的是最外层的html对应的网页,如果要获取嵌套
页面中的内容,必须同switch_to来切换frame
"""
b = Chrome()
b.get('https://mail.163.com/')
frame = b.find_element_by_css_selector('#loginDiv>iframe')
b.switch_to.frame(frame)
print(b.page_source)
爬虫流程
1.用requests + 网页地址
2.用requests + 网页地址 + user-agent
3.如果需要登录:用requests + 网页地址 + user-agent + cookies
4.找json接口
5.使用selenium