爬虫实战5:爬取百度图片

import requests
from urllib.error import URLError
import os
import urllib
from urllib.parse import urlencode
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}


def get_page(offset):
#解析网页
    params = {
        'pn': offset,
        'rn': 30,
        'gsm': str(hex(offset))}
    url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E8%A1%97%E6%8B%8D&cl=&lm=&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=&hd=&latest=&copyright=&word=%E8%A1%97%E6%8B%8D&s=&se=&tab=&width=&height=&face=&istype=&qc=&nc=&fr=&expermode=&selected_tags=' + urlencode(
        params)
    try:
        request = requests.get(url, headers=headers)
        if request.status_code == 200:
            return request.json()
    except:
        return None


def decry(a):
#解密图片网址
    str_table = {
        '_z2C$q': ':',
        '_z&e3B': '.',
        'AzdH3F': '/', }
    in_table = '0123456789abcdefghijklmnopqrstuvw'
    out_table = '7dgjmoru140852vsnkheb963wtqplifca'
    char_table = str.maketrans(in_table, out_table)
    if True:
        for key, value in str_table.items():
            a = a.replace(key, value)
        a = a.translate(char_table)
        return a


def get_img(json):
#获取图片网址
    data = json.get('data')
    m = 1
    if data:
        for item in data:
            try:
                if item.get('objURL'):
                    image = decry(item.get('objURL'))
                    title = str(m)
                    yield{
                        'image': image,
                        'title': title, }
                    m += 1
            except URLError as e:
                print(e.reason)


def save_img(item):
#保存图片
    path = os.path.join("./mypic/", item.get('title'))
    if not os.path.exists(path):
        os.makedirs(path)
    local_image_url = item.get('image')
    if local_image_url:
        save_pic = path + "/" + local_image_url.split('/').pop(2)+".jpg"
        urllib.request.urlretrieve(local_image_url, save_pic)


def main(offset):
    json = get_page(offset)
    for item in get_img(json):
        print(item)
        save_img(item)


if __name__ == '__main__':
    for i in range(1, 3):
        j = i * 30
        main(offset=j)


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值