1.导入模块并判断所下载类型PPT是否存在文件夹,若无,新建
import requests
import os
from lxml import etree
path = "e:/中秋节ppt模板"
if not os.path.exists(path):
os.mkdir(path)
2.访问基础页面,并且拿到总页数列表,确定该类型文件共有几页
b_url ="http://www.1ppt.com/moban/zhongqiujie/"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36'
}
response = requests.get(url= b_url, headers=headers)
response.encoding = 'gbk'
html = etree.HTML(response.text)
num = html.xpath('//ul[@class="pages"]//a/text()')[-3]
3.循环依次访问每页数据,并获取每个PPT的名称及下载链接
for i in range(1,int(num)+1):
url = b_url + f"ppt_zhongqiujie_{i}.html"
response = requests.get(url= url, headers=headers)
response.encoding = 'gbk'
html = etree.HTML(response.text)
ppt_name = html.xpath('//ul[@class="tplist"]//img/@alt')
ppt_url = html.xpath('//ul[@class="tplist"]//h2/a/@href')
ppt_url = ["http://www.1ppt.com"+i for i in ppt_url]
dt = dict(zip(ppt_name, ppt_url))
for k, v in dt.items():
res = requests.get(url =v,headers =headers)
res.encoding = "gbk"
html = etree.HTML(res.text)
down_url = html.xpath('//ul[@class="downurllist"]//a/@href')[0]
print(down_url)
resp = requests.get(down_url).content
4.数据存储
houzhui = v.split(".")[-1]
path = path + k + '.' + houzhui
print(path)
with open(path, "wb") as f:
f.write(resp)
print(k,"下载完成")
完整代码展示
import requests
import os
from lxml import etree
path = "e:/中秋节ppt模板"
if not os.path.exists(path):
os.mkdir(path)
b_url ="http://www.1ppt.com/moban/zhongqiujie/"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36'
}
response = requests.get(url= b_url, headers=headers)
response.encoding = 'gbk'
html = etree.HTML(response.text)
num = html.xpath('//ul[@class="pages"]//a/text()')[-3]
for i in range(1,int(num)+1):
url = b_url + f"ppt_zhongqiujie_{i}.html"
response = requests.get(url= url, headers=headers)
response.encoding = 'gbk'
html = etree.HTML(response.text)
ppt_name = html.xpath('//ul[@class="tplist"]//img/@alt')
ppt_url = html.xpath('//ul[@class="tplist"]//h2/a/@href')
ppt_url = ["http://www.1ppt.com"+i for i in ppt_url]
dt = dict(zip(ppt_name, ppt_url))
for k, v in dt.items():
res = requests.get(url =v,headers =headers)
res.encoding = "gbk"
html = etree.HTML(res.text)
down_url = html.xpath('//ul[@class="downurllist"]//a/@href')[0]
print(down_url)
resp = requests.get(down_url).content
houzhui = v.split(".")[-1]
path = path + k + '.' + houzhui
print(path)
with open(path, "wb") as f:
f.write(resp)
print(k,"下载完成")