python爬虫设计模式_爬虫设计模式-twisted

#!/usr/bin/env python#-*- coding:utf-8 -*-

from twisted.web.client importgetPage, deferfrom twisted.internet importreactorimportqueueclassResponse(object):def __init__(self, body, request):

self.body=body

self.request=request

self.url=request.url

@propertydeftext(self):return self.body.decode(‘utf-8‘)classRequest(object):def __init__(self, url, callback=None):

self.url=url

self.callback=callbackclassScheduler(object):def __init__(self, engine):

self.q=queue.Queue()

self.engine=enginedefenqueue_request(self, request):

self.q.put(request)defnext_request(self):try:

req= self.q.get(block=False)exceptException as e:

req=Nonereturnreqdefsize(self):returnself.q.qsize()classExecutionEngine(object):def __init__(self):

self._closewait=None

self.running=True

self.start_requests=None

self.scheduler=Scheduler(self)

self.inprogress=set()defcheck_empty(self, response):if notself.running:

self._closewait.callback(‘......‘)def_next_request(self):whileself.start_requests:try:

request=next(self.start_requests)exceptStopIteration:

self.start_requests=Noneelse:

self.scheduler.enqueue_request(request)while len(self.inprogress) < 5 and self.scheduler.size() > 0: #最大并发数为5

request=self.scheduler.next_request()if notrequest:breakself.inprogress.add(request)

d= getPage(bytes(request.url, encoding=‘utf-8‘))

d.addBoth(self._handle_downloader_output, request)

d.addBoth(lambdax, req: self.inprogress.remove(req), request)

d.addBoth(lambdax: self._next_request())if len(self.inprogress) == 0 and self.scheduler.size() ==0:

self._closewait.callback(None)def_handle_downloader_output(self, body, request):"""获取内容,执行回调函数,并且把回调函数中的返回值获取,并添加到队列中

:param response:

:param request:

:return:"""

importtypes

response=Response(body, request)

func= request.callback orself.spider.parse

gen=func(response)ifisinstance(gen, types.GeneratorType):for req ingen:

self.scheduler.enqueue_request(req)

@defer.inlineCallbacksdefstart(self):

self._closewait=defer.Deferred()yieldself._closewaitdefopen_spider(self, spider, start_requests):

self.start_requests=start_requests

self.spider=spider

reactor.callLater(0, self._next_request)classCrawler(object):def __init__(self, spidercls):

self.spidercls=spidercls

self.spider=None

self.engine=None

@defer.inlineCallbacksdefcrawl(self):

self.engine=ExecutionEngine()

self.spider=self.spidercls()

start_requests=iter(self.spider.start_requests())

start_requests=iter(start_requests)

self.engine.open_spider(self.spider, start_requests)yieldself.engine.start()classCrawlerProcess(object):def __init__(self):

self._active=set()

self.crawlers=set()def crawl(self, spidercls, *args, **kwargs):

crawler=Crawler(spidercls)

self.crawlers.add(crawler)

d= crawler.crawl(*args, **kwargs)

self._active.add(d)returnddefstart(self):

dl=defer.DeferredList(self._active)

dl.addBoth(self._stop_reactor)

reactor.run()def _stop_reactor(self, _=None):

reactor.stop()classSpider(object):defstart_requests(self):for url inself.start_urls:yieldRequest(url)classChoutiSpider(Spider):

name= "chouti"start_urls=[‘http://dig.chouti.com/‘,

]defparse(self, response):print(response.text)classCnblogsSpider(Spider):

name= "cnblogs"start_urls=[‘http://www.cnblogs.com/‘,

]defparse(self, response):print(response.text)if __name__ == ‘__main__‘:

spider_cls_list=[ChoutiSpider, CnblogsSpider]

crawler_process=CrawlerProcess()for spider_cls inspider_cls_list:

crawler_process.crawl(spider_cls)

crawler_process.start()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值