爬取智联招聘(实习生)招聘信息(精简版)
一、直接上代码:
#主函数
import requests
from bs4 import BeautifulSoup
import pandas as pd
import openpyxl
#定义存储变量
cs=[]
qymc=[]
gw=[]
zprs=[]
#获取网页源代码的函数
for i in range(34):
url='https://xiaoyuan.zhaopin.com/search/jn=4&jt=45,47,48,53,54,57,79,317,665,666,667,668,669,671,672,679,687,861,863,864,2034,2035,2036,2037,2038,2039,2040,2041,2042,2043,407,49,692,694,695,696,868,2063,2064,2065,316,552,556,670,689,841,41,51,55,59,315,388,389,551,678,690,698,699&pg='+str(i)
request=requests.get(url)
html=request.text
#解析源代码,提取信息并保存至Excel中
soup=BeautifulSoup(html,'html.parser')
for i in soup.find_all('div',class_="presentation-item"):
cs.append(i.find_all('span',class_="city fn-left")[0].text)
qymc.append(i.find_all('div',class_="fn-right company")[0].text)
gw.append(i.find_all('div',class_="fn-left position")[0].text)
zprs.append(i.find_all('span',class_="num fn-left")[0].text)
pd.DataFrame({'城市':cs,'企业名称':qymc,'岗位名称':gw,'招聘人数':zprs})
data=pd.DataFrame({'城市':cs,'企业名称':qymc,'岗位名称':gw,'招聘人数':zprs})
writer=pd.ExcelWriter('zhilian.xlsx')
data.to_excel(writer,'爬虫数据')
writer.save()
二、爬取结果: