爬虫实战3-多进程爬取拉钩全网数据

第一步 先爬取拉钩首页数据 得到所有的岗位名称 和url

import requests
from pyquery import PyQuery
import json
header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'}


def get_html(url, header=''):
    '''
    :param url: 要访问的地址
    :param header: 设置请求头
    :return: 返回响应数据
    '''
    response = requests.get(url, headers=header, timeout=3)
    # 如果状态码200  表示成功
    if response.status_code == 200:
        # 设置编码
        response.encoding = response.apparent_encoding
        # 返回数据
        return response.text
    else:
        print('访问 {} 失败了。。 {}'.format(url, response.status_code))
        return None


def parser_html(html):
    '''
    :param html: 需要处理的html
    :return: 返回当前页面的数据
    '''
    doc = PyQuery(html)
    title = doc('title')
    print(title)
    all_a = doc('div.mainNavs a').items()

    data = {}
    for a in all_a:
        data[a.text()] = a.attr('href')
    print(type(json.dumps(data,)))
    return json.dumps(data,ensure_ascii=False)


def save_data(data, path=''):
    '''
    :param data: 要保存的数据
    :param path: 保存的路径
    :return:
    '''
    # 保存数据
    with open(path, 'a', encoding='utf-8') as f:
        f.write(data)


def main(url):
    # 获取当前页面
    html = get_html(url, header=header)
    # 解析当前页
    data = parser_html(html)
    # 保存当前数据集
    save_data(data, 'lagou_links.txt')

if __name__ == '__main__':

    main('https://www.lagou.com/')

第二步 根据首页岗位和url 得到相应岗位的招聘总数量

import requests
from pyquery import PyQuery
import json
header = {
    'accept': 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
    'referer': 'https://www.lagou.com',
    'sec-fetch-dest': 'empty',
    'sec-fetch-mode': "cors",
    'sec-fetch-site': 'same-origin'
    }

def get_html(url, header=''):
    '''
    :param url: 要访问的地址
    :param header: 设置请求头
    :return: 返回响应数据
    '''
    response = requests.get(url, headers=header, timeout=3)
    # 如果状态码200  表示成功
    if response.status_code == 200:
        # 设置编码
        response.encoding = response.apparent_encoding
        # 返回数据
        return response.text
    else:
        print('访问 {} 失败了。。 {}'.format(url, response.status_code))
        return None



data = {}
with open('lagou_links.txt','r',encoding='utf-8') as f:
    all_url = f.read()
    urls = json.loads(all_url,encoding='utf-8')
    # print(urls.items())
    for name,url in urls.items():
        html = get_html(url)
        doc = PyQuery(html)
        num = doc('span.span.totalNum').text()
        data[name] = num
        print(name,num)
data = json.dumps(data)
with open('lagou-data.txt','w',encoding='utf-8')as f:
    f.write(data)



第三步 根据招聘岗位名称和数量 得到所有的招聘数据

import requests
import csv, json, time
from multiprocessing import Pool

header = {
    'accept': 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
    'origin': 'https://www.lagou.com',
    'referer': 'https://www.lagou.com/jobs/list_python%20?labelWords=&fromSearch=true&suginput=',
    'sec-fetch-dest': 'empty',
    'sec-fetch-mode': "cors",
    'sec-fetch-site': 'same-origin'
}
with open('lagou-data.txt', 'r', )as f:
    links = f.read()
    links = json.loads(links)
    print(type(links), links)


def main(name, num, city='北京'):
    # 初始url
    start_url = f'https://www.lagou.com/jobs/list_{name}%20?labelWords=&fromSearch=true&suginput='
    # 真实的url
    url = f'https://www.lagou.com/jobs/positionAjax.json?city={city}&needAddtionalResult=false'
    #
    for page in range(1, int(num) + 1):
        # 创建session对象
        session = requests.Session()
        # 得到带cookies的session
        session.get(start_url, headers=header)
        #
        data = {
            'first': 'true',
            'pn': page,  # 翻页
            'kd': name,  # 岗位名
        }
        # 用session 带上cookies  发送post请求
        info = session.post(url, headers=header, cookies=session.cookies, data=data)

        result = (info.json()['content']['positionResult']['result'])
        # 写入数据
        with open('拉钩data.csv', 'a', encoding='utf-8', newline='') as f:
            for i in result:
                info = []
                info.append(i['positionName'])
                info.append(i['companyFullName'])
                info.append(i['companyShortName'])
                info.append(i['companySize'])
                info.append(i['industryField'])
                info.append(i['financeStage'])
                info.append(','.join(i['companyLabelList']))
                info.append(i['firstType'])
                info.append(','.join(i['positionLables']))
                info.append('-' + i['createTime'])
                info.append(i['city'])
                info.append(i['district'])
                info.append(i['salary'])
                info.append(i['workYear'])
                if i['linestaion']:
                    info.append(i['linestaion'])
                else:
                    info.append('空')

                ow = csv.writer(f)
                ow.writerow(info)

# 单线程开发
# for name, num in links.items():
#     main(name, num,)

# 多线程开放
if __name__ == '__main__':
    t1 = time.time()
    # 创建进程池
    pool = Pool(3)
    # 翻页 将所有的url 放入池中
    for name, num in links.items():
        pool.apply_async(main, args=(name, num,))
    pool.close()  # 关闭进程池
    pool.join()  # 主进程等待
    t2 = time.time()
    print(t2 - t1)
print('全部完成')


  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值