BeautifulSoup爬取智联招聘数据
警告:
此项技术仅适用于练习,限制大量大规模爬取,在爬取中使用了个人cookie,请注意不要随意泄露,内含个人隐私信息!
如果过分爬取,会造成ip被封!
1.导入所需函数库
from bs4 import BeautifulSoup
import requests
import pandas as pd
import pymysql
2.爬取招聘数据
3.配置请求头
## USER_AGENT
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.134 Safari/537.36 Edg/103.0.1264.77'
## URL:网页从p=1 -> p=34
URL = 'https://sou.zhaopin.com/?jl=531&p=1'
## COOKIE
COOKIE = ''
4.数据请求
#网页请求
html=rq.get(url=URL,headers=headers,timeout = 30)
#字符集设置
html.encoding="utf-8"
#获取请求状态码
code=html.status_code
if code==200:
print("网页请求成功")
#网页数据解析
page=bs(html.text,"html.parser")
#获取岗位名称
job_name=page.find_all("span",class_="iteminfo__line1__jobname__name")
#获取岗位薪资
job_salary=page.find_all("p",class_="iteminfo__line2__jobdesc__salary")
#获取公司名称
company_name=page.find_all("span",class_="iteminfo__line1__compname__name")
#获取公司地址
company_address=page.find_all("ul",class_="iteminfo__line2__jobdesc__demand")
#公司介绍
company_dec=page.find_all("div",class_="iteminfo__line2__compdesc")
else:
print("网页请求失败")
print(job_name[1])
print(job_salary[1])
print(company_name[1])
print(company_address[1])
print(company_dec[1])
网页请求成功
<span class="iteminfo__line1__jobname__name" title="Java开发工程师"><span style="color: #FF5959;">Java开发</span>工程师</span>
<p class="iteminfo__line2__jobdesc__salary">
6千-7千
<!-- --></p>
<span class="iteminfo__line1__compname__name" title="耘盛祥荣(天津)科技创新产业发展有限公司">耘盛祥荣(天津)科技创新产业发展有限公司</span>
<ul class="iteminfo__line2__jobdesc__demand"><li class="iteminfo__line2__jobdesc__demand__item">天津-滨海新区</li> <li class="iteminfo__line2__jobdesc__demand__item">不限</li> <li class="iteminfo__line2__jobdesc__demand__item">大专</li></ul>
<div class="iteminfo__line2__compdesc"><span class="iteminfo__line2__compdesc__item">其它 </span> <span class="iteminfo__line2__compdesc__item">20-99人 </span></div>
5.定义函数
# 用来获取网页body标签内容的方法
def get_body_text(url):
try:
headers = {
'User-Agent':USER_AGENT,
'Cookie':COOKIE.encode("utf-8").decode("latin1")
}
req = requests.get(url = url, headers = headers, timeout = 30)
# 若状态码不是200,则抛出异常
req.raise_for_status()
req.encoding = 'UTF-8'
except Exception as e:
print('爬取错误', e)
else:
print(req.url, '爬取成功')
return BeautifulSoup(req.text, 'html.parser')
6.数据爬取
## 从一个 job_info_block 中提取出所需信息的方法
#数据库配置
conn = pymysql.connect(host="localhost",port=3306,user="root",passwd="root",db="invite_data" )
#使用cursor()方法获取操作游标
cursor = conn.cursor()
def find_info(job_info):
# 添加职位名称
jobName=job_info.find('span', {'class':'iteminfo__line1__jobname__name'}).get_text()
# 添加薪资
salaryDesc=job_info.find('p', {'class':'iteminfo__line2__jobdesc__salary'}).get_text().replace(' ','').replace('\n','')
# 添加工作要求
jobLabels=job_info.find_all('li', {'class':'iteminfo__line2__jobdesc__demand__item'})[1].get_text()
# 添加技能要求
skills=str(job_info.find_all('div', {'class':'iteminfo__line3__welfare__item'}))
# 添加经验要求
jobExperience=job_info.find_all('li', {'class':'iteminfo__line2__jobdesc__demand__item'})[2].get_text()
# 添加工作地址
cityName=job_info.find_all('li', {'class':'iteminfo__line2__jobdesc__demand__item'})[0].get_text()
# 添加公司名称
brandName=job_info.find('span', {'class':'iteminfo__line1__compname__name'}).get_text()
sql="INSERT INTO job_list (jobName, salaryDesc, jobLabels, skills, jobExperience, cityName, brandName) VALUES (%s,%s,%s,%s,%s,%s,%s)"
param=(jobName, salaryDesc, jobLabels, skills, jobExperience, cityName, brandName)
cursor.execute(sql,param)
# 提交
conn.commit()
7.定义主函数
def main():
BASE_URL = 'https://sou.zhaopin.com/?jl=531&p='
PAGE = 1
## 定义一个列表用来存储职位信息
job_list = []
while PAGE <= 34:
# 拼接URL
URL = BASE_URL + str(PAGE)
# 发送请求,获取当前页的HTML文本
body = get_body_text(URL)
# 获取当前页中的所有job_info代码块
job_infos = body.find_all('a', {'class' : 'joblist-box__iteminfo'})
# 循环info_block_list,把处理后的job_info添加到job_list中
for job_info in job_infos:
job_list.append(find_info(job_info))
job_list.to_txt(f'../data/json/zhaoping2{PAGE}.txt')
PAGE += 1
8.主启动器
if __name__ == '__main__':
main()