爬取某里的社招网python岗位及全部岗位

import urllib
from urllib import request,parse
import json

headers={
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Mobile Safari/537.36"
}

#获取python岗位
def GetPython(starurl,data):
    url = starurl

    #url编码
    data = urllib.parse.urlencode(data).encode('utf-8')
    req = urllib.request.Request(url, data = data, headers = headers)
    response = urllib.request.urlopen(req)
    response = response.read().decode('utf-8')
    response = json.loads(response)
    # print(response)


    # 获取职位信息
    returnValue = response['returnValue']['datas']
    for i in returnValue:
        # 获取职位信息
        departmentName = i['departmentName']
        # 获取工作地方
        workLocation = i['workLocation']
        #获取学历要求
        degree = i['degree']
        print(departmentName,workLocation,degree)


# 获取全部岗位
def GetAll(url):
    data = {
        "pageSize": "10",
        "t": "0.4097248009187844",
        "pageIndex": "1"
    }

    # 获取全部岗位数量页数
    data = urllib.parse.urlencode(data).encode('utf-8')
    req = urllib.request.Request(url, data, headers)
    response = urllib.request.urlopen(req).read()
    data = json.loads(response)
    totalPage = data['returnValue']['totalPage']

    for p in range(1, int(totalPage) + 1):
        data = {
            "pageSize": "10",
            "t": "0.4097248009187844",
            "pageIndex": str(p)
        }

        # 获取全部岗位数量页数
        data = urllib.parse.urlencode(data).encode('utf-8')
        req = urllib.request.Request(url, data, headers)
        response = urllib.request.urlopen(req).read()

        returnValue = json.loads(response)['returnValue']['datas']
        # print(returnValue)
        for job in returnValue:
            # 部门
            departmentName = job['departmentName']
            # 要求
            description = job['description']

            print(departmentName, description)

if __name__ == '__main__':
    # pass
    starurl = 'https://job.alibaba.com/zhaopin/socialPositionList/doList.json'

    data = {
        "pageSize": "10",
        "t": "0.5203601518784822",
        "keyWord": "python",
        "pageIndex":1
    }
    # GetPython(starurl,data)

    GetAll(starurl)
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
爬取Boss直聘全国的Python开发岗位,你可以使用Python的爬虫库来实现。以下是一个简单的示例代码,可以帮助你开始: ```python import requests import json def get_job_list(): url = 'https://www.zhipin.com/wapi/zpgeek/common/data/city.json' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36' } response = requests.get(url, headers=headers) cities = json.loads(response.text)['zpData']['cityList'] for city in cities: city_code = city['code'] city_name = city['name'] job_url = f'https://www.zhipin.com/wapi/zpgeek/job/list/job/query.json?city={city_code}&query=python&page=1&pageSize=10' response = requests.get(job_url, headers=headers) job_data = json.loads(response.text)['zpData']['data']['list'] for job in job_data: job_name = job['jobName'] company_name = job['company']['name'] salary = job['salary'] print(f"城市:{city_name},公司:{company_name},职位:{job_name},薪资:{salary}") get_job_list() ``` 这段代码首先发送一个请求获取城市列表,然后遍历每个城市,发送请求获取该城市的Python开发岗位信息。最后,打印出每个岗位的城市、公司、职位和薪资信息。 请注意,这只是一个简单的示例代码,实际爬取过程中可能需要处理反爬机制、分页等问题。另外,爬取网站数据时请遵守网站的使用规则,不要频繁发送请求以免对网站造成负担。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值