from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import xlwt
import json
workbook = xlwt.Workbook(encoding='utf-8')
worksheet = workbook.add_sheet('My Worksheet')
worksheet.write(0, 0, "序号")
worksheet.write(0, 1, "工作名称")
worksheet.write(0, 2, "公司名称")
worksheet.write(0, 3, "公司地点")
worksheet.write(0, 4, "公司类型")
worksheet.write(0, 5, "招聘条件")
worksheet.write(0, 6, "工资待遇")
worksheet.write(0, 7, "工作福利")
worksheet.write(0, 8, "岗位职责")
worksheet.write(0, 9, "公司链接")
j = 1
for a in range(0, 5):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
url = "https://search.51job.com/list/080200,000000,0000,00,9,99,%25E5%25A4%25A7%25E6%2595%25B0%25E6%258D%25AE,2,{}.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare=".format(a+1)
ret = Request(url, headers=headers)
html = urlopen(ret)
bs = BeautifulSoup(html, "html.parser")
contents = bs.find_all("script", {"type": "text/javascript"})
for content in contents:
artical = content.get_text()
if len(artical) > 0:
artical = artical.replace('window.__SEARCH_RESULT__ =', '')
artical_x = json.loads(artical)
artical_y = artical_x["engine_search_result"]
for i in range(0, len(artical_y)):
worksheet.write(j, 0, j)
worksheet.write(j, 1, artical_y[i]['job_name'])
worksheet.write(j, 2, artical_y[i]['company_name'])
worksheet.write(j, 3, artical_y[i]['workarea_text'])
worksheet.write(j, 4, artical_y[i]['companytype_text'])
worksheet.write(j, 5, artical_y[i]['attribute_text'])
worksheet.write(j, 6, artical_y[i]['providesalary_text'])
worksheet.write(j, 7, artical_y[i]['jobwelf'])
worksheet.write(j, 9, artical_y[i]['company_href'])
link = artical_y[i]['job_href']
website = Request(link, headers=headers)
web = urlopen(website)
bs2 = BeautifulSoup(web, "html.parser")
div = bs2.find("div", {"class": "bmsg job_msg inbox"})
if div is not None:
bodies = div.get_text(strip=True)
worksheet.write(j, 8, bodies)
j += 1
workbook.save('前程无忧.xls')
Python爬虫学习之爬取招聘信息并存入Excel
最新推荐文章于 2022-03-01 17:27:24 发布