【python】多线程爬虫实例

# -*- coding:utf-8 -*-

from lxml import etree
import requests
from Queue import Queue
import threading
import time
import json

class thread_crawl(threading.Thread):
    '''
    抓取线程类
    '''
    def __init__(self,threadID,q):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.q = q

    def run(self):
        print("starting" + self.threadID)
        self.qiushi_spider()
        print("exiting" + self.threadID)

    def qiushi_spider(self):
        while True:
            if self.q.empty():
                break
            else:
                page = self.q.get()
                print("qiushi_spider=",self.threadID,",page=",str(page))
                url = 'http://www.qiushibaike.com/8hr/page/' + str(page) + '/'
                headers = {
                    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
                    'Accept-Language': 'zh-CN,zh;q=0.8'
                }
                # 多次尝试失败,防止进入死循环
                timeout = 4
                while timeout > 0:
                    timeout -= 1
                    try:
                        content = requests.get(url,headers=headers)
                        data_queue.put(content.text)
                        break
                    except Exception, e:
                        print("qiushi_spider",e)
                if timeout < 0:
                    print("timeout...",url)

class Thread_Parser(threading.Thread):
    '''
    页面解析类
    '''


    def __init__(self,threadID,queue,lock,f):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.queue = queue
        self.lock = lock
        self.f = f

    def run(self):
        print("start....",self.threadID)
        global total,exitFlag_Parser
        while not exitFlag_Parser:
            try:
                '''
                调用队列对象的get()方法从头删除并返回一个项目,可选参数为block,默认为True
                如果队列为空且block为True,get()就使线程暂停,直至有项目可用。
                如果队列为空且block为False,队列将引发Empty异常。
                '''
                item = self.queue.get(False)
                if not item:
                    pass
                # 解析网页
                self.parse_data(item)
                self.queue.task_done()
                print 'Thread_Parser=', self.threadID, ',total=', total
            except:
                pass
        print("exiting...",self.threadID)


    def parse_data(self,item):
        '''
        解析网页
        :param item:
        :return:
        '''
        global total
        try:
            html = etree.HTML(item)
            result = html.xpath('//div[contains(@id,"qiushi_tag")]')
            for site in result:
                try:
                    imgUrl = site.xpath('.//img/@src')[0]
                    title = site.xpath('.//h2')[0].text
                    content = site.xpath('.//div[@class="content"]/span')[0].text.strip()
                    vote = None
                    comments = None
                    try:
                        vote = site.xpath('.//i')[0].text
                        comments = site.xpath('.//i')[1].text
                    except:
                        pass
                    result = {
                        'imgUrl': imgUrl,
                        'title': title,
                        'content': content,
                        'vote': vote,
                        'comments': comments,
                    }

                    with self.lock:
                        self.f.write(json.dumps(result,ensure_ascii=False).encode('utf-8') + '\n')

                except Exception,e:
                    print("site in result",e)
        except Exception,e:
            print("parser_data",e)
        with self.lock:
            total += 1


data_queue = Queue()
exitFlag_Parser = False
lock = threading.Lock()
total = 0

def main():
    output = open("./file/data.json","a")
    # 初始化网页页码page从1-10
    pageQueue = Queue(50)
    for page in range(1,11):
        pageQueue.put(page)

    # 初始化采集线程
    crawlthreads = []
    crawlList = ["crawl_1","crawl_2","crawl_3"]
    # 分别启动crawlList
    for threadID in crawlList:
        thread = thread_crawl(threadID,pageQueue)
        thread.start()
        crawlthreads.append(thread)
    # 初始化解析线程parserList
    parserthreads = []
    parserList = ["parser-1", "parser-2", "parser-3"]
    # 分别启动parserList
    for threadID in parserList:
        thread = Thread_Parser(threadID,data_queue,lock,output)
        thread.start()
        parserthreads.append(thread)

    # 等待队列清空
    while not pageQueue.empty():
        pass

    # 等待所有线程完成
    for t in crawlthreads:
        t.join()

    while not data_queue.empty():
        pass
    # 通知线程退出
    global exitFlag_Parser
    exitFlag_Parser = True
    for t in parserthreads:
        t.join()
    print("Main thread end...")
    with lock:
        output.close()


if __name__ == '__main__':
    main()



本次参考网站为: https://www.qiushibaike.com/

结果如下:





  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值