批量抓取表情包爬虫脚本

  

import re
import os
import time
import requests
import multiprocessing
from multiprocessing.pool import ThreadPool
picqueue = multiprocessing.Queue()
pagequeue = multiprocessing.Queue()
logqueue = multiprocessing.Queue()
picpool = ThreadPool(50)
pagepool = ThreadPool(5)
error = []
for x in range(1, 838):
    pagequeue.put(x)


def getimglist(body):
    imglist = re.findall(
        ur'data-original="//ws\d([^"]+)" data-backup="[^"]+" alt="([^"]+)"', body)
    for url, name in imglist:

        if name:
            name = name + url[-4:]
            url = "http://ws1" + url
            logqueue.put(url)
            picqueue.put((name, url))
    if len(imglist)==0:
        print body



def savefile():
    http = requests.Session()
    while True:
        name, url = picqueue.get()
        if not os.path.isfile(name):
            req = http.get(url)
            try:
                open(name, 'wb').write(req.content)
            except:
                error.append([name, url])


def getpage():
    http = requests.Session()
    while True:
        pageid = pagequeue.get()
        req = http.get(
                "https://www.doutula.com/photo/list/?page={}".format(pageid))
        getimglist(req.text)
        time.sleep(1)


for x in range(5):
    pagepool.apply_async(getpage)
for x in range(50):
    picpool.apply_async(savefile)
while True:
    print picqueue.qsize(), pagequeue.qsize(), logqueue.qsize()
    time.sleep(1)

7分钟左右,即可爬完

转载于:https://www.cnblogs.com/howmp/p/6947151.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值