【2019.05】python 爬取拉钩数据(静态+动态)

38 篇文章 2 订阅
19 篇文章 3 订阅

拉钩这个网站有点意思,页面加载有两种方法
点击首页的标签,页面是静态加载的
直接搜索,页是动态加载的
两种爬取方式不一样,这里都实现了。

  • 动态爬取时会出现 ‘操作太频繁,请稍后再试’字样,出现这种情况是cookie的问题,解决方案详见代码

静态爬取

https://www.lagou.com/zhaopin/jiqixuexi/?labelWords=label

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2019/5/6 21:12
# @Author  : Paulson
# @File    : Spider_jingtai.py
# @Software: PyCharm
# @define  : function


import random
import time

import pandas as pd
import requests
from lxml import etree

# 真实cookie
# Cookie = 'xx‘’
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
    # 'Cookie': Cookie
}


for i in range(1, 2):
    time.sleep(random.randint(3, 10))
    url = 'https://www.lagou.com/zhaopin/jiqixuexi/{}/?filterOption=2'.format(i)
    print(url)
    print('正在抓取第{}页数据...'.format(i))

    # 请求网页并解析
    con = etree.HTML(requests.get(url, headers=headers).text)

    # 使用xpath表达式抽取各个字段信息
    job_name = [i for i in con.xpath('//a[@class="position_link"]/h3/text()')]
    print(job_name)
    job_address = [i for i in con.xpath("//a[@class='position_link']/span/em/text()")]
    job_company = [i for i in con.xpath("//div[@class='company_name']/a/text()")]
    job_salary = [i for i in con.xpath("//span[@class='money']/text()")]
    job_exp_edu = [i for i in con.xpath("//div[@class='li_b_l']/text()")]
    job_exp_edu2 = [i for i in [i.strip() for i in job_exp_edu] if i != '']
    job_industry = [i for i in con.xpath("//div[@class='industry']/text()")]
    job_tempation = [i for i in con.xpath("//div[@class='list_item_bot']/div[@class='li_b_r']/text()")]
    job_links = [i for i in con.xpath("//div[@class='p_top']/a/@href")]
    print(job_links)
    # 获取详情页链接后采集详情页岗位描述信息
    job_des = []
    for link in job_links:
        time.sleep(random.randint(3, 10))
        con2 = etree.HTML(requests.get(url=link, headers=headers).text)
        des = [[i.xpath('string(.)') for i in con2.xpath("//dd[@class='job_bt']/div/p")]]
        print(des)
        job_des += des

# 对数据进行字典封装
    dataset = {
        '岗位名称': job_name,
        '工作地址': job_address,
        '公司': job_company,
        '薪资': job_salary,
        '经验学历': job_exp_edu2,
        '所属行业': job_industry,
        '岗位福利': job_tempation,
        '任职要求': job_des
    }

# 转化为数据框并存为csv
    data = pd.DataFrame(dataset)
    data.to_csv('machine_learning_hz_job2.csv')

结果

在这里插入图片描述

动态爬取

https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2019/5/6 22:11
# @Author  : Paulson
# @File    : Spider_dongtai.py
# @Software: PyCharm
# @define  : function


import json
import random
import time
import requests
from bs4 import BeautifulSoup
import pandas as pd

#定义抓取主函数
def lagou_dynamic_crawl():
    headers = {
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Referer': 'https://www.lagou.com/jobs/list_%E8%BF%90%E7%BB%B4?city=%E6%88%90%E9%83%BD&cl=false&fromSearch=true&labelWords=&suginput=',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
    }

    # 创建一个职位列表容器
    positions = []
    for page in range(1, 31):
        print('正在抓取{}页数据...'.format(page))
        URL_ = 'https://www.lagou.com/jobs/list_%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0?city=%E5%85%A8%E5%9B%BD&cl=false&fromSearch=true&labelWords=&suginput='
        URL = 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'
        # 构建请求保单参数
        params = {
            'first': 'false',
            'pn': page,
            'kd': '机器学习'
        }

        # 构造请求并返回结果
        s = requests.Session()
        s.get(URL_, headers=headers, timeout=3)  # 请求首页获取cookies
        cookie = s.cookies  # 为此次获取的cookies
        result = s.post(URL, headers=headers, data=params, cookies=cookie, timeout=3)
        print(result.text)

        # 将请求结果转化为json
        json_result = result.json()

        # 解析json数据获取目标信息
        try:
            position_info = json_result['content']['positionResult']['result']
        except:
            position_info = json_result['msg']
            if '您操作太频繁' in position_info:
                print('操作太频繁')
                assert False

        # 循环当前页每一个职位信息,再去爬职位详情页面
        for position in position_info:
            # 把我们要爬取的信息放入字典
            position_dict = {
                'position_name': position['positionName'],
                'work_year': position['workYear'],
                'education': position['education'],
                'salary': position['salary'],
                'city': position['city'],
                'company_name': position['companyFullName'],
                'address': position['businessZones'],
                'label': position['companyLabelList'],
                'stage': position['financeStage'],
                'size': position['companySize'],
                'advantage': position['positionAdvantage'],
                'industry': position['industryField'],
                'industryLables': position['industryLables']
            }
            # 找到职位id
            position_id = position['positionId']

            # 根据职位id调用岗位描述函数获取职位描述

            position_dict['position_detail'] = recruit_detail(position_id)
            positions.append(position_dict)

        time.sleep(random.randint(3, 6))
    print('全部数据采集完毕...')
    return positions

# 定义抓取岗位描述函数
def recruit_detail(position_id):
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        "Cache-Control": "max-age=0",
        "Connection": "keep-alive",
        "Host": "www.lagou.com",
        "Referer": "https://www.lagou.com/jobs/list_%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0?labelWords=&fromSearch=true&suginput=",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36",
    }
    url = 'https://www.lagou.com/jobs/%s.html' % position_id
    result = requests.get(url, headers=headers)
    time.sleep(random.randint(1, 4))

    # 解析职位要求text
    soup = BeautifulSoup(result.text, 'html.parser')
    job_bt = soup.find(class_='job_bt')

    # 通过尝试发现部分记录描述存在空的情况
    # 所以这里需要判断处理一下
    if job_bt != None:
        job_bt = job_bt.text
    else:
        job_bt = 'null'
    return job_bt


if __name__ == '__main__':
    positions = lagou_dynamic_crawl()
    data = pd.DataFrame(positions)
    data.to_csv('machine_learning_hz_job3.csv')

结果

在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值