python使用requests模块请求网址,使用lxml模块中etree抓取数据,并使用time模块延时
爬取斗图啦如图所示:
![在这里插入图片描述](https://i-blog.csdnimg.cn/blog_migrate/1b471131aedb4890faa5085e0294de10.png)
将爬取到每页的数据保存在文件夹中
![在这里插入图片描述](https://i-blog.csdnimg.cn/blog_migrate/a0d2f9a720b097c8f23c2734c90e11ef.png)
打开任意一个文件,将图片保存在内:
![在这里插入图片描述](https://i-blog.csdnimg.cn/blog_migrate/9ab7114fc2557731668e877cded0e21e.png)
程序源代码如下
import requests
from lxml import etree
import os
from urllib import request
import threading
import datetime
import time
headers = {
'cookie': '__cfduid=db5ac00d3df95885d80910dbbb72bc5da1583899178; UM_distinctid=170c7bf703db78-0ae45a3e1acfac-4313f6a-e1000-170c7bf703e9d4; _ga=GA1.2.1160623723.1583899177; _gid=GA1.2.1784373393.1583899177; _agep=1583899178; _agfp=b4b1dbd87877adff8004abcaddedc4e4; _agtk=7e80188209e12a870250d68bd89bd629; __gads=ID=b46146fd63418556:T=1583899179:S=ALNI_Ma3uyE3KIX7GCnCrTOk6RpyFWnSSA; CNZZDATA1256911977=1665695726-1583898988-%7C1583904392; _gat=1; XSRF-TOKEN=eyJpdiI6IjREYURIRVwvME16Rnd3UVpwWkRnSDhRPT0iLCJ2YWx1ZSI6IkhwSGtFdkJVQyt1QXVxNEJjdkJyTGhBZlg0Y21vTUdkRDFNQTMzcWdMQnMyZ0x1MXVHSnZqY2JVM2M2ZVdPaDIiLCJtYWMiOiI3MjAyNzhmZGYzY2RjMTBiMDA2NjRkNWFkM2Q0NTJjYWY5OWZiMDIzYWIyOWI5OTc1OTgwMDdiZmY4YmE2MTU3In0%3D; doutula_session=eyJpdiI6IkhvTXVLMnk5KytWWW5lbVpBNFVYbEE9PSIsInZhbHVlIjoiY1hZVVVTMVwvczlsakkwbjB0NkJrY1ltYkl3VENqWVBhTDd1M0JqRklaOHZWVmxnVkVLSGt0dWVjam9uSkhOMTUiLCJtYWMiOiI2NjU3MjBkZmM0Mzk4MGUxODQ3OWVlMGYyYzJmNWQwZjc5MGNiZDI2ZGE5NzBiMmUyY2M3NGFmNTMyY2Q1MTdjIn0%3D',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'
}
def get_detial_url(url,save_file):
try:
response = requests.get(url,headers = headers)
text = response.content.decode("UTF-8")
html = etree.HTML(text)
urls = html.xpath('//div[@class = "page-content text-center"]/div[1]/a/img/@data-original')
def download():
save_file_detial = save_file + '\\' + str(x) + '.png'
request.urlretrieve(urls[x], save_file_detial)
for x in range(len(urls)):
th = threading.Thread(target=download)
th.start()
except:
print("解析错误")
def start_spider():
page = int(input("该网站共有3339页动图,请输入需要爬取多少页:"))
for number in range(1,page+1,1):
start_time = datetime.datetime.now()
print("开始下载第"+str(number)+'页')
if number == 1:
os.makedirs('斗图集\\第' + str(number) + '页')
file_name = os.getcwd() + '\\斗图集\\第' + str(number) + '页'
url = 'https://www.doutula.com/photo/list'
else:
os.makedirs('斗图集\\第' + str(number) + '页')
file_name = os.getcwd() + '\\斗图集\\第' + str(number) + '页'
url = 'https://www.doutula.com/photo/list/?page=' + str(number)
get_detial_url(url, file_name)
stop_time = datetime.datetime.now()
print("正在搜索",url)
print("第" + str(number) + '页下载完成'+'运行了'+str((stop_time-start_time).microseconds)+"微秒")
time.sleep(1)
if __name__ == '__main__':
os.mkdir("斗图集")
start_spider()