Python down 网页图片

总览:

例子1 爬取网页比较简单,没有反爬机制

# coding: utf8
import time

import requests
import os
from bs4 import BeautifulSoup

def downLoad_html(url):
    image_name_list = []

    # 设置http header,视情况加需要的条目,这里的token是用来鉴权的一种方式
    header = {"Authorization": "Bearer "}

    source_code = requests.get(url, header)
    plain_text = source_code.text
    soup = BeautifulSoup(plain_text, 'html.parser')

    # 1、搜索到所有<a>标签
    for link in soup.find_all('a'):

        # 2、解析<a>标签格式,提取href后的链接内容
        image_name = link.get("href")

        # filter the thumb photo ,and root dir
        if image_name != "../" and "thumb" not in image_name:
            image_name_list.append(link.get("href"))

    print(image_name_list)
    return image_name_list


def download_img(dir_url, image_name_list):

    # 设置http header,视情况加需要的条目,这里的token是用来鉴权的一种方式
    header = {"Authorization": "Bearer "}
    
    # make a dir to save photo            
    if not os.path.exists("处方"):
        os.mkdir("处方")

    # for save the image, create file name
    image_index = 1

    for i in image_name_list:
        r = requests.get(dir_url+i, headers=header, stream=True, timeout=5)

        # 返回状态码
        print(r.status_code)

        if r.status_code == 200:
            with open("./处方/"+str(image_index)+".jpg", 'wb') as f:
                f.write(r.content)  # 将内容写入图片
            print("done")
            image_index += 1
            time.sleep(5)
        del r

if __name__ == '__main__':
    # 下载要的图片
    dir_url = "http://*****/pic/"

    image_name_list = downLoad_html("http://****/pic/")

    download_img(dir_url, image_name_list)

案例2:down baidu photo

import requests
import os
import urllib


class Spider_baidu_image():
    def __init__(self):
        self.url = 'http://image.baidu.com/search/acjson?'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.\
            3497.81 Safari/537.36'}
        self.headers_image = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.\
            3497.81 Safari/537.36',
            'Referer': 'http://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1557124645631_R&pv=&ic=&nc=1&z=&hd=1&latest=0&copyright=0&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&sid=&word=%E8%83%A1%E6%AD%8C'}
        self.keyword = input("请输入搜索图片关键字:")
        self.paginator = int(input("请输入搜索页数,每页30张图片:"))

    def get_param(self):
        """
        获取url请求的参数,存入列表并返回
        :return:
        """
        keyword = urllib.parse.quote(self.keyword)
        params = []
        for i in range(1, self.paginator + 1):
            params.append(
                'tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=&hd=1&latest=0&copyright=0&word={}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&cg=star&pn={}&rn=30&gsm=78&1557125391211='.format(
                    keyword, keyword, 30 * i))
        return params

    def get_urls(self, params):
        """
        由url参数返回各个url拼接后的响应,存入列表并返回
        :return:
        """
        urls = []
        for i in params:
            urls.append(self.url + i)
        return urls

    def get_image_url(self, urls):
        image_url = []
        for url in urls:
            json_data = requests.get(url, headers=self.headers).json()
            json_data = json_data.get('data')
            for i in json_data:
                if i:
                    image_url.append(i.get('thumbURL'))
        return image_url

    def get_image(self, image_url):
        """
        根据图片url,在本地目录下新建一个以搜索关键字命名的文件夹,然后将每一个图片存入。
        :param image_url:
        :return:
        """
        cwd = os.getcwd()
        file_name = os.path.join(cwd, self.keyword)
        if not os.path.exists(self.keyword):
            os.mkdir(file_name)
        for index, url in enumerate(image_url, start=1):
            with open(file_name + '\\{}.jpg'.format(index), 'wb') as f:
                f.write(requests.get(url, headers=self.headers_image).content)
            if index != 0 and index % 30 == 0:
                print('{}第{}页下载完成'.format(self.keyword, index / 30))

    def __call__(self, *args, **kwargs):
        params = self.get_param()
        urls = self.get_urls(params)
        image_url = self.get_image_url(urls)
        self.get_image(image_url)


if __name__ == '__main__':
    spider = Spider_baidu_image()
    spider()

报错记录:

requests.exceptions.ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))

原因:超时的问题。设置一下超时就好了。timeout

解决:r = requests.get(dir_url+i, headers=header, stream=True, timeout=5)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值