基本请求库requests的使用,基本解析库lxml的使用
爬虫程序源代码及爬取过程分析:
import requests
from lxml import etree
'''
1.需求分析
获取详细的招聘内容
职位名称 职位类别 人数 地点 发布时间
入口地址:https://hr.tencent.com/position.php
2.源码分析
获取所有行://tr[@class='even']|//tr[@class='odd']
获取下一页地址://a[@id='next']
3.代码实现
'''
starturl = 'https://hr.tencent.com/position.php'
'''
一.请求入口地址
'''
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
page = 1
while True:
num = 1
response = requests.get(starturl, headers = headers).text
'''
二.提取内容
'''
# 解析源码
html = etree.HTML(response)
# 获取所有的行
tr = html.xpath("//tr[@class='even']|//tr[@class='odd']")
# 遍历获取每一行的内容
jobdic = {}
for i in tr:
jobdic['title'] = i.xpath('string(td[1])')
jobdic['type'] = i.xpath('string(td[2])')
jobdic['num'] = i.xpath('string(td[3])')
jobdic['addr'] = i.xpath('string(td[4])')
jobdic['date'] = i.xpath('string(td[5])')
print('第{0}页第{1}条信息'.format(page,num))
num += 1
# 保存数据
with open('tencent.txt', 'a+', encoding = 'utf-8 ') as file:
file.write(jobdic['title']+'::')
file.write(jobdic['type']+'::')
file.write(jobdic['num']+'::')
file.write(jobdic['addr']+'::')
file.write(jobdic['date']+'\n')
# 翻页,获取下一页
nextpage = html.xpath("//a[@id='next']/@href")
if nextpage[0] == 'javascript:;':
break
else:
starturl = 'https://hr.tencent.com/'+nextpage[0]
page+=1