Python爬虫练习

之前写过一份爬取lagou网搜索“数据分析”相关职位的文章lagou网职位信息爬虫练习
,最近入职了一家设计为主的公司,所以想做一份关于“设计”的数据分析报告,发现直接跑原来的代码会爬不到数据,所以稍微修改了一下。本篇主要记录爬虫代码。

首先要明确lagou网的zhaopin信息存储网页是post形式的,所以必须填写from_data信息。我们这里填的是from_data = {‘first’:‘true’, ‘pn’:‘1’, ‘kd’:‘设计’},其中pn代表当前页码,kd就是我们搜索的职位关键词。 第二就是记得要用Session 获取动态cookies,否则爬下来的数据空空如也,还容易被封IP封号一条龙安排找到存数据的json

#导入使用的库
import requests
from bs4 import BeautifulSoup
import json
import pandas as pd
import time
from datetime import datetime

#从职位详情页面内获取职位要求
def getjobneeds(positionId):
    url = 'https://www.lagou.com/jobs/{}.html'
    headers = {
        'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
        'Host':'www.lagou.com',
        'Referer':'https://www.lagou.com/jobs/list_%E8%AE%BE%E8%AE%A1/p-city_0?px=default',
        'Upgrade-Insecure-Requests':'1'
        }
    
    s = requests.Session()
    s.get(url.format(positionId), headers=headers, timeout=3)  # 请求首页获取cookies
    cookie = s.cookies  # 为此次获取的cookies
    response = s.get(url.format(positionId), headers=headers, cookies=cookie, timeout=3)  # 获取此次文本
    time.sleep(5)#休息 休息一下
    
    
    soup = BeautifulSoup(response.text,'html.parser')
    need =  ' '.join([p.text.strip()for p in soup.select('.job_bt div')])
    return need


#获取职位具体信息#获取职位具体 
def getjobdetails(jd):
    results= {}
    results['businessZones'] = jd['businessZones']
    results['companyFullName'] = jd['companyFullName']
    results['companyLabelList'] = jd['companyLabelList']
    results['financeStage'] = jd['financeStage']
    results['skillLables'] = jd['skillLables']
    results['companySize'] = jd['companySize']
    results['latitude'] = jd['latitude']
    results['longitude'] = jd['longitude']
    results['city'] = jd['city']
    results['district'] = jd['district']
    results['salary'] = jd['salary']
    results['secondType'] = jd['secondType']
    results['workYear'] = jd['workYear']
    results['education'] = jd['education']
    results['firstType'] = jd['firstType']
    results['thirdType'] = jd['thirdType']
    results['positionName'] = jd['positionName']
    results['positionLables'] = jd['positionLables']
    results['positionAdvantage'] = jd['positionAdvantage']
    positionId = jd['positionId']
    results['need'] = getjobneeds(positionId)
    time.sleep(2)#设置暂停时间,控制频率
    print(jd,'get')
    return results

#获取整个页面上的职位信息
def parseListLinks(url_start,url_parse):
    jobs = []
    from_data = {'first':'true',
                'pn':'1',
                'kd':'设计'}
    
    headers = {
        'Host':'www.lagou.com',
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Referer':'https://www.lagou.com/jobs/list_%E8%AE%BE%E8%AE%A1/p-city_0?px=default',
        'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
        'X-Anit-Forge-Code':'0',
        'X-Anit-Forge-Token':'None',
        'X-Requested-With':'XMLHttpRequest',
    }
    
    res = []
    for n in range(30):
        from_data['pn'] = n + 1
        s = requests.Session()
        s.get(url_start, headers=headers, timeout=3)  # 请求首页获取cookies
        cookie = s.cookies  # 为此次获取的cookies
        response = s.post(url_parse, data=from_data, headers=headers, cookies=cookie, timeout=3)  # 获取此次文本
        time.sleep(5)
        res.append(response)
        
    jd = []
    for m in range(len(res)):
        jd.append(json.loads(res[m].text)['content']['positionResult']['result'])
    for j in range(len(jd)):
        for i in range(15):
            jobs.append(getjobdetails(jd[j][i]))
    time.sleep(30)
    return jobs

def main():
    url_start = "https://www.lagou.com/jobs/list_设计?city=%E6%88%90%E9%83%BD&cl=false&fromSearch=true&labelWords=&suginput="
    url_parse = "https://www.lagou.com/jobs/positionAjax.json?city=&needAddtionalResult=false"
    jobs_total = parseListLinks(url_start,url_parse)
    now = datetime.now().strftime('%m%d_%H%M%S')
    newsname = 'lagou_sj'+now+'.xlsx'#按时间命名文件
    df = pd.DataFrame(jobs_total)
    df.to_excel(newsname)
    print('文件已保存')
    
if __name__ == '__main__':
    main()

拉勾网每页有15条数据,默认显示30页,一共450条数据。我这里直接写死啦,大家可以根据需要修改爬取页数。也可以选择不获取“岗位要求”信息,或者其他不需要的信息。保存下来的文件是这个样子的。保存为excel

更新:
评论区很多小伙伴说
报错了 jd.append(json.loads(res[m].text)[‘content’][‘positionResult’][‘result’]) KeyError: ‘content’
可能是cookie没有获取到,这边建议先打开浏览器获取一下,然后直接写死到header里面

    from_data = {'first':'true',
                'pn':'1',
                'kd':'设计'}
    
    headers = {
        'Host':'www.lagou.com',
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Referer':'https://www.lagou.com/jobs/list_%E8%AE%BE%E8%AE%A1/p-city_0?px=default',
        'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
        'X-Anit-Forge-Code':'0',
        'X-Anit-Forge-Token':'None',
        'X-Requested-With':'XMLHttpRequest',
        'cookie':'privacyPolicyPopup=false; user_trace_token=20211220150143-948a4174-441e-46f6-b888-ac1b81c8f347; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1639983704; _ga=GA1.2.767093018.1639983704; _gat=1; LGSID=20211220150143-dcc48212-6d40-4bfd-8209-53fd87b2da07; PRE_UTM=m_cf_cpt_baidu_pcbt; PRE_HOST=www.baidu.com; PRE_SITE=https%3A%2F%2Fwww.baidu.com%2Fother.php%3Fsc.0s0000abXmX5wCOQJIE9hCPWCNCIZ5CIvep01gqi999%5F7pd1VYIb2COrpUSoN4g8VqeDje4Kcr4-siZxUxdrERqa22JBLvyci1z56ZzvW9aLKMJjiQyOQWpw28t8v764u2%5Fa8gL%5F3Unn9kr-UAiIOdyqnEwUfXeYD4yCWk-gtC%5FQ5mRnUxJPqiZr6SyN1yQ4QaszBXHD0-qn1mud0a6TEIPUFXmy.7Y%5FNR2Ar5Od663rj6tJQrGvKD77h24SU5WudF6ksswGuh9J4qt7jHzk8sHfGmYt%5FrE-9kYryqM764TTPqKi%5FnYQZHuukL0.TLFWgv-b5HDkrfK1ThPGujYknHb0THY0IAYqs2v4VnL30ZN1ugFxIZ-suHYs0A7bgLw4TARqnsKLULFb5TaV8UHPS0KzmLmqnfKdThkxpyfqnHR1nHTdrjfvPfKVINqGujYkPjR1PjD4n6KVgv-b5HDknWDLrHRY0AdYTAkxpyfqnHczP1n0TZuxpyfqn0KGuAnqHbG2RsKspyfqn1c0mv-b5Hc3n0KWThnqPHnvP0%26ck%3D3259.1.126.408.160.412.157.494%26dt%3D1639983700%26wd%3D%25E6%258B%2589%25E5%258B%25BE%25E7%25BD%2591%26tpl%3Dtpl%5F12273%5F25897%5F22126%26l%3D1531758465%26us%3DlinkName%253D%2525E6%2525A0%252587%2525E9%2525A2%252598-%2525E4%2525B8%2525BB%2525E6%2525A0%252587%2525E9%2525A2%252598%2526linkText%253D%2525E3%252580%252590%2525E6%25258B%252589%2525E5%25258B%2525BE%2525E6%25258B%25259B%2525E8%252581%252598%2525E3%252580%252591%2525E5%2525AE%252598%2525E6%252596%2525B9%2525E7%2525BD%252591%2525E7%2525AB%252599%252520-%252520%2525E4%2525BA%252592%2525E8%252581%252594%2525E7%2525BD%252591%2525E9%2525AB%252598%2525E8%252596%2525AA%2525E5%2525A5%2525BD%2525E5%2525B7%2525A5%2525E4%2525BD%25259C%2525EF%2525BC%25258C%2525E4%2525B8%25258A%2525E6%25258B%252589%2525E5%25258B%2525BE%21%2526linkType%253D; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Flanding-page%2Fpc%2Fsearch.html%3Futm%5Fsource%3Dm%5Fcf%5Fcpt%5Fbaidu%5Fpcbt; LGUID=20211220150143-6b44ffcb-0d8c-4201-8ed2-556360ad318e; __lg_stoken__=f4224059ca7db31e8ede64a3a95b0739d3aa4fc2028c0a388feaa5c241d60148e292e3b3f4c2c4330b112ca852e85a386f2d93efcb7896187bec640fa0c884dc3ecaa2cf9a96; JSESSIONID=ABAAAECABFAACEA03C147A845A2A2C3E8FF621330EEA86C; WEBTJ-ID=20211220150522-17dd6a8bc293d0-0ec3d98707bd95-4303066-2073600-17dd6a8bc2ae71; X_HTTP_TOKEN=21af495de25477363293899361737baf402df5aa8f; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2217dd6a8c031870-0af2249a2288d4-4303066-2073600-17dd6a8c032e94%22%2C%22%24device_id%22%3A%2217dd6a8c031870-0af2249a2288d4-4303066-2073600-17dd6a8c032e94%22%7D; sajssdk_2015_cross_new_user=1; _gid=GA1.2.897179725.1639983923; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1639983923; LGRID=20211220150524-eed1ae66-2059-4725-ba42-cb6d1da1d056; SEARCH_ID=dc048d37a3624143b45dbcc8f9bb8d3f',
    }
    url_start = "https://www.lagou.com/jobs/list_设计?city=%E6%88%90%E9%83%BD&cl=false&fromSearch=true&labelWords=&suginput="
    url_parse = "https://www.lagou.com/jobs/positionAjax.json?city=&needAddtionalResult=false"

cookie要换成自己的哈(不排除有封号风险)
然后再执行后面的代码,就可以获取到数据啦

response = s.post(url_parse, data=from_data, headers=headers, cookies=cookie, timeout=3)  # 获取此次文本
json.loads(response.text)['content']['positionResult']['result']

在这里插入图片描述

  • 1
    点赞
  • 23
    收藏
    觉得还不错? 一键收藏
  • 15
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 15
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值