用xpath爬取Boss招聘网

学到的知识:
1.对xpath的使用有了一些基本的了解
2.python中的zip()可以压缩数据

代码


import requests
import json
from urllib.parse import urlencode
from lxml import etree
from multiprocessing import Pool
from config import *
import pymongo


# 连接mongoDB数据库
client = pymongo.MongoClient(MONGO_URL, connect=False)
db = client[MONGO_DB]
# 伪装浏览器
headers = {
    'Host': 'www.zhipin.com',
    'Referer': 'https://www.zhipin.com/c101010100/?query=python&page=2&ka=page-2',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0',
    'cookie': '__a=75047249.1583481129.1583493500.1583552887.40.4.22.40; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1583481130,1583493501,1583552888; __zp_stoken__=0177HqyJ6TuMApOVc2qY%2Bor1jg8SMVSt1kzumB%2Bqjtzmz3tEZJHqzhfCzkkYXubt4kmRP5QfAYK2ocz7rMItmhN1aTFfijWmDbH6vbD%2FYpAqgoYhCAB0X4f9W4bzorJ2Yqqk; __c=1583552887; __g=-; __l=l=https%3A%2F%2Fwww.zhipin.com%2Fweb%2Fcommon%2Fsecurity-check.html%3Fseed%3D0exfky4DeMyGMjnOz9NrDPc0tFAHc5%252F070%252BJ6gDaOx0%253D%26name%3D302f64cf%26ts%3D1583552886965%26callbackUrl%3D%252Fc101010100%252F%253Fquery%253Dpython%2526page%253D0%2526ka%253Dpage-0%26srcReferer%3D&r=&friend_source=0&friend_source=0; lastCity=100010000; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1583568408; __zp_sseed__=0exfky4DeMyGMjnOz9NrDJZ5++vgp4hJ5abK0v4QSGM=; __zp_sname__=302f64cf; __zp_sts__=1583569627769'
}

# 定义一个空的列表,用来储存职位
item = []
# 使用代理ip池爬取
proxy = None
proxy_pool_url = 'http://localhost:5555/random'
# 从代理IP池中拿取一个代理IP
def get_proxy():
    try:
        response = requests.get(proxy_pool_url)
        if response.status_code == 200:
            return response.text
        return None
    except ConnectionAbortedError:
        return None

# 获取爬取目标的网页
def get_page(keyword, offset, count=1):
    # 限制爬取的次数,避免陷入无限循环
    if count <= 5:
        # post请求的参数
        data = {
            'query': keyword,
            'page': offset,
            'ka': 'page-' + str(offset)
        }
        # 用urlencode拼接url
        url = 'https://www.zhipin.com/c101010100/?' + urlencode(data)
        # 获取代理ip
        proxy = get_proxy()
        # 抛异常避免程序中断
        try:
            # 判断是否获取到代理ip
            if proxy:
                proxies = {
                    'http': 'http://' + proxy
                }
                print('正在使用IP:', proxy)
                response = requests.get(url, headers=headers, proxies=proxies)
            else:
                return get_page(keyword, offset)
            # 判断是否请求成功
            if response.status_code == 200:
                return response.text
            elif response.status_code == 302:
                return get_page(keyword, offset)
            else:
                print('请求失败!!!!')
                return None
        except ConnectionError:
            count += 1
            return get_page(keyword, offset, count)
    else:
        print('请求次数太多!!!')
        return None

# 解析网页
def parse_page_detail(html):
    # 使用xpth爬取网页
    etree_html = etree.HTML(html)
    for i in range(30):
        president = etree_html.xpath('//div[@class="tags"]')[i].xpath('string(.)').split()
        item.append(president)
    picture = etree_html.xpath('//img[@class="company-logo"]/@src')
    dress = etree_html.xpath('//span[@class="job-area"]/text()')
    salary = etree_html.xpath('//div[@class="job-limit clearfix"]/span/text()')
    company = etree_html.xpath('//div[@class="company-text"]/h3/a/text()')
    # 将爬取到的数据压缩
    data = zip(company, item, salary, dress, picture)
    # 构建字典
    for it in data:
        yield {
            'company': it[0],
            'item': it[1],
            'salary': it[2],
            'dress': it[3],
            'picture': it[4]
        }

# 存文件
def save_to_file(data):
    file_path = "D://python.txt"
    with open(file_path, 'a+', encoding='utf-8') as f:
        f.write(json.dumps(data, ensure_ascii=False) + '\n')
        f.close()

#存到mongoDB数据库
def save_to_mongoDB(data):
    if db[MONGO_TABLE].insert(data):
        print('存储成功:', data)
        return True
    return False


def main(offset):
    html = get_page('python', offset)
    for data in parse_page_detail(html):
        save_to_file(data)
        save_to_mongoDB(data)


if __name__ == '__main__':
    # 多线程爬取网,加快爬取速度
    groups = [x for x in range(0, 4)]
    pool = Pool()
    pool.map(main, groups)

在这里插入图片描述在这里插入图片描述
这里面涉及到一些代理IP的使用,网上有专门的代理IP池 代码,不需要自己来写

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值