目标网址
代码
基础代码:第一页模板下载
import requests
from lxml import etree
if __name__ == '__main__':
headers = {
'User-Agent': 'Mozilla / 5.0(Windows NT 6.1; Win64; x64) AppleWebKit / 537.36(KHTML, like Gecko) Chrome / 86.0.4240.198 Safari / 537.36'
}
# 免费下载链接 url = 'https://sc.chinaz.com/jianli/free.html'
page_text = requests.get(url=url, headers=headers)
# 解决中文乱码 page_text.encoding = page_text.apparent_encoding
page_text = page_text.text
# 实例化一个etree对象 tree = etree.HTML(page_text)
# 分析网页结构,获取div div_list = tree.xpath('//*[@id="container"]/div')
for div in div_list:
# 获取a标签的href属性值 href = div.xpath('./a/@href')[0]
# 拼接每一页对应的简历下载链接 href = 'https:' + href
# 获取每一页的模板名称 resume_name = div.xpath('./p/a/text()')[0]
# 每一页的单个模板发起get请求 resume_page_text = requests.get(url=href, headers=headers).text
# 重新实例化一个etree对象 tree_resume = etree.HTML(resume_page_text)
# 获取模板页面中的下载链接 download_href = tree_resume.xpath('//div[@class="down_wrap"]/div[2]/ul/li/a/@href')[0]
# 对模板链接发起get请求 rar_data = requests.get(url=download_href, headers=headers).content
# 数据存放路径 outfile_path = './page1/' + resume_name + '.rar'
# 将响应数据以二进制存储 with open(outfile_path, 'wb') as fw:
fw.write(rar_data)
结果:
PS:如有需要Python学习资料的小伙伴可以加点击下方链接自行获取
代码优化:分页操作 跳过
#跳过