Python爬虫学习之爬取百度图片并保存到本地

import json, re ,os
from urllib.request import Request, urlopen, urlretrieve

def json_all(pn):
    links = []
    for i in range(0, pn+1):
        url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&logid=10112593352318188956&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E5%88%97%E7%BB%B4%E5%9D%A6%E9%A3%8E%E6%99%AF%E6%B2%B9%E7%94%BB&cl=&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=%2C&ic=&hd=0&latest=0&copyright=0&word=%E5%88%97%E7%BB%B4%E5%9D%A6%E9%A3%8E%E6%99%AF%E6%B2%B9%E7%94%BB&s=&se=&tab=&width=&height=&face=0&istype=&qc=&nc=&fr=&expermode=&force=&pn={}&rn=30&gsm=5a&1608190938210='.format(i*30)
        header = {
            'Referer': 'https://image.baidu.com/search/index?ct=201326592&z=&s=&tn=baiduimage&ipn=r&word=%E5%88%97%E7%BB%B4%E5%9D%A6%E9%A3%8E%E6%99%AF%E6%B2%B9%E7%94%BB&ie=utf-8&oe=utf-8&lm=-1&hd=0&latest=0&copyright=0&st=-1&z=&se=0&fr=ala&pn=0&width=&height=&face=0&ct=503316480',
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}
        info_all = Request(url=url, headers=header)
        f = urlopen(info_all).read().decode('utf-8')
        info = json.loads(f)
        for ii in info['data']:
            if 'hoverURL' in ii.keys():
                links.append(ii['hoverURL'])
    return links


if __name__ == '__main__':
    urls = json_all(5)
    i = 1
    for url in urls:
        if not os.path.exists('./Picture'):
            os.makedirs('./Picture')
        dir = os.path.abspath('./Picture')
        work_path = os.path.join(dir, '{}.jpeg').format(i)
        urlretrieve(url, work_path)
        i += 1
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值