Python爬虫学习之爬取招聘信息并存入Excel

from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import xlwt
import json

workbook = xlwt.Workbook(encoding='utf-8')
worksheet = workbook.add_sheet('My Worksheet')
worksheet.write(0, 0, "序号")
worksheet.write(0, 1, "工作名称")
worksheet.write(0, 2, "公司名称")
worksheet.write(0, 3, "公司地点")
worksheet.write(0, 4, "公司类型")
worksheet.write(0, 5, "招聘条件")
worksheet.write(0, 6, "工资待遇")
worksheet.write(0, 7, "工作福利")
worksheet.write(0, 8, "岗位职责")
worksheet.write(0, 9, "公司链接")

j = 1
for a in range(0, 5):
    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
    url = "https://search.51job.com/list/080200,000000,0000,00,9,99,%25E5%25A4%25A7%25E6%2595%25B0%25E6%258D%25AE,2,{}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare=".format(a+1)
    ret = Request(url, headers=headers)
    html = urlopen(ret)
    bs = BeautifulSoup(html, "html.parser")
    contents = bs.find_all("script", {"type": "text/javascript"})
    for content in contents:
        artical = content.get_text()
        if len(artical) > 0:
            artical = artical.replace('window.__SEARCH_RESULT__ =', '')
            artical_x = json.loads(artical)
            artical_y = artical_x["engine_search_result"]
            for i in range(0, len(artical_y)):
                worksheet.write(j, 0, j)
                worksheet.write(j, 1, artical_y[i]['job_name'])
                worksheet.write(j, 2, artical_y[i]['company_name'])
                worksheet.write(j, 3, artical_y[i]['workarea_text'])
                worksheet.write(j, 4, artical_y[i]['companytype_text'])
                worksheet.write(j, 5, artical_y[i]['attribute_text'])
                worksheet.write(j, 6, artical_y[i]['providesalary_text'])
                worksheet.write(j, 7, artical_y[i]['jobwelf'])
                worksheet.write(j, 9, artical_y[i]['company_href'])
                link = artical_y[i]['job_href']
                website = Request(link, headers=headers)
                web = urlopen(website)
                bs2 = BeautifulSoup(web, "html.parser")
                div = bs2.find("div", {"class": "bmsg job_msg inbox"})
                if div is not None:
                    bodies = div.get_text(strip=True)
                    worksheet.write(j, 8, bodies)
                j += 1
workbook.save('前程无忧.xls')
  • 2
    点赞
  • 18
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值