# 2021年10月29日完成,爬取51job网站获得相关岗位信息
from selenium import webdriver
from selenium.webdriver import FirefoxOptions
from lxml import etree
from xpinyin import Pinyin
import time
import re
import csv
import ast
import requests
option = FirefoxOptions()
driver = webdriver.Firefox()
driver.implicitly_wait(10)
# 输入职位目标城市
goal = input("目标城市:")
jobName = input("目标岗位:")
# 将城市名转为拼音
p = Pinyin()
cityPinyin = p.get_pinyin(goal, '')
# 跳转至目标城市网页
cityUrl = 'https://www.51job.com/'+cityPinyin+'/'
driver.get(cityUrl)
# 岗位搜索框输入
driver.find_element_by_xpath('//*[@id="kwdselectid"]').send_keys(jobName)
# 点击搜索,网页跳转至搜索结果页面
driver.find_elements_by_xpath('//button')[0].click()
# 等待1秒
time.sleep(1)
# 定位到目前网页的url,就是搜索结果的第一页
url = driver.current_url
# 正则匹配,得到其他页的URL
url1 = re.compile('https://search.51job.com/list/(\S+).html?').search(url).group()[:-6]
url2 = re.compile('.html?(\S+)').search(url).group()
print(url1+str(2)+url2)
# UA伪装
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0'
}
# 定义根据页面的index获取页面职位列表的函数
def getPage(index):
# 获取当前URL的HTML页面
responseNow = requests.get(url=url1+str(index)+url2, headers=headers)
htmlNow = etree.HTML(responseNow.text)
# 使用lxml库的xpath查找script元素的内容(提前观察前程无忧搜索结果页面源代码的结构,可以发现岗位列表在script中)
resultNow = htmlNow.xpath('//script/text()')
contentNow = resultNow[-2]
# 使用lxml库的xpath查找script元素的内容(提前观察前程无忧搜索结果页面源代码的结构,可以发现岗位列表在script中)
# 正则表达式获取岗位,一个岗位存储在一个字典中,所有岗位存储在a list of dict,
# 广告岗位和普通岗位又存储在不同的两个list中,在这里我只统计普通岗位
jobList = re.compile('"engine_jds":(.*)"adid":"(\S*)"}]').search(str(contentNow)).group()[13:]
return ast.literal_eval(jobList)
# 定义将字典列表写入jobs.csv的函数
def write_dict(value):
table = ["type", "jt", "tags",
"ad_track", "jobid","coid", "effect",
"is_special_job", "job_href",
"job_name", "job_title",
"company_href", "company_name",
"providesalary_text", "workarea", "workarea_text", "updatedate", "iscommunicate",
"companytype_text", "degreefrom", "workyear", "issuedate", "isFromXyz", "isIntern",
"isdiffcity", "attribute_text", "companysize_text",
"companyind_text", "adid"]
# table = ["job_href","job_name", "job_title", "company_name","providesalary_text","workarea_text", "updatedate",
# "companytype_text", "workyear", "issuedate","jobwelf_list", "attribute_text", "companysize_text","companyind_text"]
with open('51jobs.csv', 'a+', newline='') as f:
xieru = csv.DictWriter(f, table)
xieru.writerows(value)
# 获取当前URL的HTML页面
response = requests.get(url=url, headers=headers)
html = etree.HTML(response.text)
# 使用lxml库的xpath查找script元素的内容(提前观察前程无忧搜索结果页面源代码的结构,可以发现岗位列表在script中)
result = html.xpath('//script/text()')
content = result[-2]
# 正则表达式获取搜索结果总页数,可以匹配1位数,两位数,三位数,四位数,五位数,可以涵盖所有可能性了
pattern = re.compile('"total_page":"(\d\d?\d?\d?\d?)"')
total_page = int(pattern.search(str(content)).group()[14:-1])
for pageNow in range(1,total_page+1):
time.sleep(1)
li = getPage(pageNow)
write_dict(li)
前程无忧51job爬虫利用selenium爬取岗位信息-2021年10月29日
最新推荐文章于 2024-05-01 14:35:23 发布