多线程实现异步爬虫

多线程,异步爬虫,安全队列

# encoding:utf-8
import requests
from lxml import etree
import urllib
import os
import re
import Queue
import threading

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36"
}


class Producer(threading.Thread):
    def __init__(self, page_queue, img_queue, *args, **kwargs):
        super(Producer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            if self.page_queue.empty():
                break
            url = self.page_queue.get()
            self.parse_page(url)

    def parse_page(self, url):
        # 1.获取网页请求
        response = requests.get(url, headers=headers)
        text = response.text
        # print text
        # 2.解析网页内容
        html = etree.HTML(text)
        images = html.xpath("//div[@class='page-content text-center']//img[@class!='gif']")
        for img in images:
            # print etree.tostring(img)
            # img.get('data-original'):get方法可以得到‘data-original’属性的值
            img_url = img.get('data-original')
            img_name = img.get('alt')
            img_name = re.sub(r'[\??!!\.~。@~\*]', '', img_name)
            img_suffix = os.path.splitext(img_url)[-1]
            img_filename = img_name+img_suffix
            print img_filename
            self.img_queue.put((img_url, img_filename))


class Consumer(threading.Thread):
    def __init__(self, page_queue, img_queue, *args, **kwargs):
        super(Consumer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            if self.img_queue.empty() and self.page_queue.empty():
                break
            img_url, img_filename = self.img_queue.get()
            urllib.urlretrieve(img_url, "Images/" + img_filename)


def main():
    # 定义2个队列
    page_queue = Queue.Queue(100)
    img_queue = Queue.Queue(1000)
    for x in range(1, 101):
        url = "http://www.doutula.com/photo/list/?page=%d" % x
        page_queue.put(url)

    for x in range(5):
        t = Producer(page_queue, img_queue)
        t.start()

    for x in range(5):
        t = Consumer(page_queue, img_queue)
        t.start()


if __name__ == '__main__':
    main()

 

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值