爬虫多线程下载图片 & 动态加载图,另类爬取方法

爬虫多线程下载图片

import requests
import os
from lxml import etree
from multiprocessing.dummy import Pool

####保存图片文件####
def save_img(url):
        name = url[-14:]
        print(name)
        date = requests.get(url)
        with open(c+'/'+name, 'wb') as f:
            f.write(date.content)  

####开始####
if __name__ == '__main__':
        a = int(input('输入开始页: '))
        b = int(input('输入结束页: '))
        c = input('输入目录名: ')
        os.makedirs(c)
        for i in range(a,b + 1):
            target='http://www.meizitu.org/page/'+str(i)+'/'
            req = requests.get(url=target)
            soup = etree.HTML(req.text)
            url_list = soup.xpath('//*[@class="thumb"]/img/@src')
            pool = Pool(12)
            pool.map(save_img,url_list)
        pool.close()
        pool.join()
        print('\n'+'....下载完成....')

动态加载图,另类爬取方法

import requests
import os
import re
from multiprocessing.dummy import Pool

def get_img_url(a,b,c):
    list_url = 'https://huaban.com/boards/'+a+'/?jxy8h0iv&max='+b+'&limit='+str(c)+'&wfl=1'
    list_req = requests.get(list_url).text
    list_req = re.findall('category_name([\W\w]*?)app._csr',list_req)[0]
    url_pins = re.findall('pin_id":(.*?), "user_id',list_req)
    url_pins +=[b]
    pool = Pool(14)
    pool.imap(get_date,url_pins)
    pool.close()
    pool.join()

def get_date(url):
    new_url = 'https://huaban.com/pins/'+url+'/'
    get_req = requests.get(url=new_url).text
    get_req = re.findall('page(.*?)type',get_req)[0]
    img_url = re.findall('"key":"(.*?)"',get_req)[0]
    img_url = 'http://hbimg.huabanimg.com/'+ img_url
    save_img(img_url)
    
def save_img(url):
        name = url[-30:-13]
        print(url+'\n')
        date = requests.get(url)
        with open(d+'/'+name+'.jpg', 'wb') as f:
            f.write(date.content)
            
if __name__ == '__main__':
    print('仅支持:https://huaban.com/boards/*/'+'\n')
    a = input('输入要下载的链接码:')
    target='https://huaban.com/boards/'+a+'/'
    req = requests.get(url=target).text
    req = re.findall('category_name([\W\w]*?)app._csr',req)[0]
    b = re.findall('pin_id":(.*?), "user_id',req)[0]
    c = int(input('须要下载前多少张图片:'))-1
    d = input('创建目录名: ')
    os.makedirs(d)  
    get_img_url(a,b,c)
    print('\n'+'....下载完成....')
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值