京东汉服

import requests
from bs4 import BeautifulSoup

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'
}
img_list = []
for page in range(1,7,2):  #  url 初始为1  每递增2 为下一页   这里获取了 3页数据
    link = 'https://search.jd.com/Search?keyword=%E6%B1%89%E6%9C%8D%E5%A5%B3&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&suggest=1.def.0.V06--12s0%2C20s0%2C38s0%2C97s0&wq=%E6%B1%89%E6%9C%8D&page={}&s=1&click=0'.format(page)
    print(link)

    response = requests.get(url=link,headers=headers)
    response.encoding = response.apparent_encoding
    html = response.text


    soup = BeautifulSoup(html,'html.parser')

    for i in range(1,61,1):
        img = soup.select("#J_goodsList > ul > li:nth-child("+str(i)+") > div > div.p-img > a > img")  # 谷歌浏览器  copy - selector
                       # J_goodsList > ul > li:nth-child(1) > div > div.p-img > a > img
                       # J_goodsList > ul > li:nth-child(2) > div > div.p-img > a > img
                       # J_goodsList > ul > li:nth-child(60) > div > div.p-img > a > img  # 由此得出 1页 60 张图片
        for img in img:
            img = img.get("source-data-lazy-img")
            img_list.append("https:"+img)
print("需要下载的图片数量:",len(img_list))

name = 0
for img_link in img_list:
    name += 1
    img_scr = requests.get(url=img_link,headers=headers).content
    print("正在下载{}张图片".format(name))
    with open(r'C:\Users\DELL\Desktop\python_wd\mig\京东汉服\\'+ str(name)+ '.png','wb' )as f:
        f.write(img_scr)
print('\n下载完成。。。。。。。')


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值