python3的url采集的多线程

# coding:utf-8

import requests
from lxml import etree
from queue import Queue
from urllib import parse
import threading
import re
import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')

class Producer(threading.Thread):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
    }

    def __init__(self, page_queue, txt_queue, *args, **kwargs):
        super(Producer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.txt_queue = txt_queue

    def run(self):
        while True:
            if self.page_queue.empty():
                break
            url = self.page_queue.get()
            self.url_search(url)

    def url_search(self, url):
        resp = requests.get(url, headers=self.headers)
        text = resp.content.decode('utf-8')
        html = etree.HTML(text)
        aElements = html.xpath("//div[@class='f13']/a[1]/text()")
       # print(quto)
        #aElements = list(set(quto))
        for aElement in aElements:
            self.txt_queue.put(aElement)


class Consumer(threading.Thread):
    def __init__(self, page_queue, txt_queue, *args, **kwargs):
        super(Consumer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.txt_queue = txt_queue

    def run(self):
        with open('urls.txt', 'w') as f:
            while True:
                if self.txt_queue.empty() and self.page_queue.empty():
                    break
                #i = re.search("(http|https)://.*?(cn|com|org|)/", self.txt_queue.get().group())
                try:
                    i = re.search(".*?(cn|com|org)/", self.txt_queue.get()).group()
                    # 正则也可以这样.+?(/|)$
                except:
                    pass
                    #print("error: "+self.txt_queue.get())
                f.write(i + '\n')
        self.qc()


    def qc(self):
        print("clear_ok")
        f = open('urls.txt')
        quto = list(set(f))
        with open('urlsok.txt', 'w') as f:
            for i in quto:
                f.write(str(i))

        #print(quto)


def main():

    page_queue = Queue(1000)
    txt_queue = Queue(10000)
    keyword = input("keyword: ")
    page = int(input("page: "))
    for i in range(0, page*10, 10):
        url = "https://www.baidu.com/s?wd=" + parse.quote(keyword) + "&pn=" + str(i)
        page_queue.put(url)

    for p in range(1):
        t1 = Producer(page_queue, txt_queue)
        t1.start()

    for s in range(1):
        t2 = Consumer(page_queue, txt_queue)
        t2.start()


if __name__ == '__main__':
    main()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值