网站:https://www.ali213.net/zt/yjwj/wiki/nielian_1_0_1.html
爬取所有迦南的图片,下载到创建的迦南文件夹下
import requests
from lxml import etree
from fake_useragent import UserAgent
import time
def get_html(url):
headers={
"UserAgent":UserAgent().random
}
res = requests.get(url,headers = headers)
# print(res.status_code)
if res.status_code == 200:
res = res.content.decode()
html = etree.HTML(res)
base_url = html.xpath(".//div/div/div/a/@data-img") #图片地址 返回de列表
base_name = html.xpath(".//div/div/a/span[1]/text() ") #图片名字 返回de列表
# print(base_url)
# print(base_name)
for i in range(1,12): #一页12个 图片
url_list = base_url[i]
print(url_list)
ress = requests.get(url_list, headers=headers)
if ress.content != None:
# print(ress.content)
name_list = base_name[i]
# print(name_list)
s = ("图片名称:") + name_list + ("-------") +("地址") + url_list
filname = "../迦南/%s.png" %name_list #新建迦南文件夹
with open(filname ,'wb') as f:
f.write(ress.content)
print("正在下载=========={}".format(s))
if __name__ == '__main__':
for z in range (0,43): #共有43页,有第0页
print("正在爬取第{}页.....".format(z))
get_html("https://www.ali213.net/zt/yjwj/wiki/nielian_1_0_{}.html".format(z)) #最后一位数字表示页数 #倒数第三位数字表示人物 胡桃2 ...
time.sleep(2)
开永劫无间,一键导入图片。
永劫无间全套链接:https://pan.baidu.com/s/129HiY2LJi7U-UalRljj85A
提取码:rtqh