import requests
from urllib import request
from lxml import etree
import os
import re
IMAGES_DIR = './images/'
def parse_url(url):
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
"cookie": "__cfduid=d2a61de29a61781abd96707aa8981826c1554019090; UM_distinctid=169d2c18026211-001319c41bb32e-3c604504-1fa400-169d2c180275e4; _ga=GA1.2.679455519.1554019091; _gid=GA1.2.1187621390.1554019091; yjs_id=ca79619b6d61f50846a806c1e6d8172b; ctrl_time=1; CNZZDATA1256911977=696277388-1554018365-%7C1554019265; XSRF-TOKEN=eyJpdiI6IlZmQXFuVXJ5Q3VTZDZOb3I2ZnhTemc9PSIsInZhbHVlIjoieXVJekZkc1IwMExxZnlLalY1Tk1IYit5M2NKaGtkeXNDaHRqYTJSVFlwRG1GK2ttekMzam9tMDZVWkt2WWRFXC8iLCJtYWMiOiI2NTA1OGRkMmNiNDFmZDEzMTgxZmU3NzQ2NzRlZGYzNThiZjkyYzMxZjkxZjM5YTNkMTI2MjQ1NTlhY2ViMmMyIn0%3D; doutula_session=eyJpdiI6InFiazlaZU4xdDZ6K2N4cW5talNid2c9PSIsInZhbHVlIjoiVTdSOFF6SExVN25mdTNCWWF3bzdXSFFHNTJOQnh3YUpnQm9RRDRESmQ0ZzdEa2VZejcrc0s3dEtFXC9EZFJRa0oiLCJtYWMiOiJjOWU1ZTBmNjU5MmJhZTVmNDRjODU0YmIzYmJjMDI1YTAxNmRhNTExZTkwOTQ5YjkwMGEyYmRmOGY0NzNhNjFiIn0%3D",
}
res = requests.get(url=url, headers=headers)
text = res.text
html = etree.HTML(text)
imgs_url = html.xpath('//div[@class="page-content text-center"]//a//img[@class!="gif"]')
for i in imgs_url:
# 获取图片地址 图片名称数据
img = i.get("data-original")
img = img.strip('!dta')
# print(img)
filename = i.get("alt")
# 替换特殊字符
filename = re.sub(r'[\??\.\,,\*\!!。]', '', filename)
# print(filename)
# 获取图片的扩展名
ext = os.path.splitext(img)[1]
# print(ext)
# 下载图片
# print(IMAGES_DIR + filename + ext)
request.urlretrieve(img, IMAGES_DIR + filename + ext)
def main():
# 先判断文件夹是否存在
if os.path.exists(IMAGES_DIR) is False:
os.mkdir(IMAGES_DIR)
for i in range(1, 101):
url = 'https://www.doutula.com/photo/list/?page=%d' % i
parse_url(url)
# 功能只测试第一页
break
if __name__ == '__main__':
main()