初学python!爬取Boss直聘招聘信息并存入csv文件当中

女朋友最近打算换工作,但是又没多少时间去一页页的看每个招聘信息,于是自学了一个半月python的我不自量力的打算把Boss直聘的一些招聘信息给爬下来~~,不多说,直接上代码吧。

因为是新手嘛,刚练手,也没有想太多,就是一条线写下去就完了

第一步,常规操作,导入库,我是图简单,直接导入了xpinyin库,直接生成拼音文件

import requests
from lxml import etree
from urllib.parse import urlencode
import ssl
import csv
import time
from xpinyin import Pinyin

pin = Pinyin()
"""搜索的职业名称"""
position_type = '幼教培训师'

"""输出文件名"""
csv_name = pin.get_pinyin(position_type)+'.csv'
current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#写入文件表头的字段
with open(csv_name, 'a', newline = '', encoding = 'utf-8') as fp:
		writer = csv.writer(fp)
		writer.writerow(['职业名称', '公司简介', '薪资', '要求', '福利', '岗位职责及要求', '地址', '网址', '获取时间'])

HEADERS = {
	'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
	'referer':'这个很简单吧',
	'cookie':'填写可用的cookies'
}

第二步,写一个固定的url来骗取总页数

#先用固定的url获取总页数
#1、构建固定的参数
params = {
	'query':position_type,
	'page':1,
	'ka':'page-1'
}
Base_url = 'https://www.zhipin.com/c101200100/?'
url = Base_url + urlencode(params)
print(url)

#2、发送请求骗取页数
resp = requests.get(url, headers= HEADERS)
resp.encoding ='utf-8'
text = resp.text
#print(text)
html = etree.HTML(text)
'''total_page页数在此,需要加入如果没有第二页的判断'''
try:
	total_page = html.xpath("//div[@class='page']/a[last()-1]/text()")[0]
	print("总页数一共为:" + str(total_page) + "!")
except:
	total_page = None
	print("没有第二页!!!!!")
#3、获取列表页下面的每个详情页的url
href = html.xpath("//div[@class='info-primary']//a/@href")
for h in href:
	detail_url = 'https://www.zhipin.com' + h
	print(detail_url)
	print("\n当前时间为:" + str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
	time.sleep(5)

	#4、通过详情页去获取内容,是详情页,而不是上面的列表页url
	resp = requests.get(detail_url, headers= HEADERS)
	resp.encoding ='utf-8'
	text = resp.text
	html = etree.HTML(text)
	title = html.xpath("//h1/text()")[0]
	try:
		company_profile = html.xpath("//div[@class='job-sec company-info']/div[@class='text']/text()")[0].strip() + "..."
	except:
		company_profile = '没有公司简介!!!!!!!!'
	print(company_profile)
	salary = html.xpath("//span[@class='salary']/text()")[0]
	req = html.xpath("//div[@class='info-primary']//p/text()")[:3]
	job_tag = html.xpath("//div[@class='tag-container']//div[@class='tag-all job-tags']//span/text()")
	positions = html.xpath("//div[@class='job-sec']/div[@class='text']/text()")
	position_description = []
	for posi in positions:
		position_description.append(posi.strip())
	address = html.xpath("//div[@class='job-location']/div[@class='location-address']/text()")[0].strip()
	with open(csv_name, 'a', newline = '', encoding = 'utf-8') as fp:
		writer = csv.writer(fp)
		writer.writerow([title, company_profile, salary, req, job_tag, position_description, address, detail_url, current_time])

第三步,写了一个方法,用于在循环中修改参数获取第二页之后的数据

def get_detail_urls(url):
	resp = requests.get(url, headers= HEADERS)
	resp.encoding ='utf-8'
	text = resp.text
	html = etree.HTML(text)
	href = html.xpath("//div[@class='info-primary']//a/@href")
	for h in href:
		detail_url = 'https://www.zhipin.com' + h
		print(detail_url)
		print("\n当前时间为:" + str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
		time.sleep(5)
		get_detail_content(detail_url)

#获取详情页的内容
def get_detail_content(url):
	resp = requests.get(url, headers= HEADERS)
	resp.encoding ='utf-8'
	text = resp.text
	html = etree.HTML(text)
	title = html.xpath("//h1/text()")[0]
	try:
		company_profile = html.xpath("//div[@class='job-sec company-info']/div[@class='text']/text()")[0].strip() + "..."
	except:
		company_profile = '没有公司简介!!!!!!!!'
	salary = html.xpath("//span[@class='salary']/text()")[0]
	req = html.xpath("//div[@class='info-primary']//p/text()")[:3]
	job_tag = html.xpath("//div[@class='tag-container']//div[@class='tag-all job-tags']//span/text()")
	positions = html.xpath("//div[@class='job-sec']/div[@class='text']/text()")
	position_description = []
	for posi in positions:
		position_description.append(posi.strip())
	address = html.xpath("//div[@class='job-location']/div[@class='location-address']/text()")[0].strip()
	with open(csv_name, 'a', newline = '', encoding = 'utf-8') as fp:
		writer = csv.writer(fp)
		writer.writerow([title, company_profile, salary, req, job_tag, position_description, address, detail_url, current_time])


#定义主方法获取所有列表页
def spider():
	if total_page is not None:
		for i in range(2, int(total_page)+1):
			print("当前在第" + str(i) + "页!")
			ka = 'page-' + str(i)
			params = {
			'query':position_type,
			'page':i,
			'ka':ka
		}
		Base_url = 'https://www.zhipin.com/c101200100/?'
		url = Base_url + urlencode(params)
		get_detail_urls(url)

if __name__ == '__main__':
	spider()

因为我只是爬一些公司的数据方便浏览,所以设置的爬取速度并不快,OK,到此结束!

  • 1
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值