import requests
from bs4 import BeautifulSoup
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'
}
img_list = []
for page in range(1,7,2): # url 初始为1 每递增2 为下一页 这里获取了 3页数据
link = 'https://search.jd.com/Search?keyword=%E6%B1%89%E6%9C%8D%E5%A5%B3&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&suggest=1.def.0.V06--12s0%2C20s0%2C38s0%2C97s0&wq=%E6%B1%89%E6%9C%8D&page={}&s=1&click=0'.format(page)
print(link)
response = requests.get(url=link,headers=headers)
response.encoding = response.apparent_encoding
html = response.text
soup = BeautifulSoup(html,'html.parser')
for i in range(1,61,1):
img = soup.select("#J_goodsList > ul > li:nth-child("+str(i)+") > div > div.p-img > a > img") # 谷歌浏览器 copy - selector
# J_goodsList > ul > li:nth-child(1) > div > div.p-img > a > img
# J_goodsList > ul > li:nth-child(2) > div > div.p-img > a > img
# J_goodsList > ul > li:nth-child(60) > div > div.p-img > a > img # 由此得出 1页 60 张图片
for img in img:
img = img.get("source-data-lazy-img")
img_list.append("https:"+img)
print("需要下载的图片数量:",len(img_list))
name = 0
for img_link in img_list:
name += 1
img_scr = requests.get(url=img_link,headers=headers).content
print("正在下载{}张图片".format(name))
with open(r'C:\Users\DELL\Desktop\python_wd\mig\京东汉服\\'+ str(name)+ '.png','wb' )as f:
f.write(img_scr)
print('\n下载完成。。。。。。。')
京东汉服
最新推荐文章于 2024-07-06 19:25:43 发布