scrapy

自己搭建scarpy

#!/usr/bin/env python
# -*- coding:utf-8 -*-
from twisted.web.client import getPage, defer
from twisted.internet import reactor
import queue


class Response(object):
    def __init__(self, body, request):
        self.body = body
        self.request = request
        self.url = request.url

    @property
    def text(self):
        return self.body.decode('utf-8')


class Request(object):
    def __init__(self, url, callback=None):
        self.url = url
        self.callback = callback


class Scheduler(object):
    def __init__(self, engine):
        self.q = queue.Queue()
        self.engine = engine

    def enqueue_request(self, request):
        """
        加入队列
        :param request:
        :return:
        """
        self.q.put(request)

    def next_request(self):
        """
        从队列中获取请求,队列中没有则返回None
        :return:
        """
        try:
            req = self.q.get(block=False)
        except Exception as e:
            req = None

        return req

    def size(self):
        return self.q.qsize()


class ExecutionEngine(object):
    def __init__(self):
        self._closewait = None
        self.start_requests = None
        self.scheduler = Scheduler(self)

        # 正在请求的实例 集合
        self.inprogress = set()

    def _next_request(self):

        # 迭代器生成一次则无法使用,所以下面代码只执行一次
        while self.start_requests:
            try:
                request = next(self.start_requests)
            except StopIteration:
                self.start_requests = None
            else:
                # 将请求丢入队列
                self.scheduler.enqueue_request(request)

        while len(self.inprogress) < 5 and self.scheduler.size() > 0:  # 最大并发数为5

            request = self.scheduler.next_request()  # 从队列中获取请求
            if not request:  # 如果返回None,说明队列中没东西了
                break

            self.inprogress.add(request)  # 丢入正在请求的实例 集合
            d = getPage(bytes(request.url, encoding='utf-8'))  # 下载,返回一个d,这里会挂起
            d.addBoth(self._handle_downloader_output, request)  # 下载结束后,调用回调函数,并构造响应对象
            d.addBoth(lambda _, req: self.inprogress.remove(req), request)  # 移除正在请求的实例 集合 中的请求
            d.addBoth(lambda _: self._next_request())  # 继续下一个请求

        # 结束标志: 正在请求实例的集合 为空 并且 调度器队列中无请求
        if len(self.inprogress) == 0 and self.scheduler.size() == 0:

            self._closewait.callback(None)

    def _handle_downloader_output(self, body, request):
        """
        获取内容,执行回调函数,并且把回调函数中的返回值获取,并添加到队列中
        :param body: 响应的内容,字节类型
        :param request: 外部传入的请求实例
        :return:
        """
        # 构建响应对象
        response = Response(body, request)
        # 调用回调函数
        func = request.callback or self.spider.parse  # 设置默认的回调parse
        gen = func(response)  # 又获得一个生成器

        import types
        if isinstance(gen, types.GeneratorType):
            # 如果是生成器,则继续使用调度器请求链接(可以加一个判断req的类型)
            for req in gen:
                # 将请求加入队列
                self.scheduler.enqueue_request(req)

    @defer.inlineCallbacks
    def start(self):
        # 加入特殊的defer,防止停止
        self._closewait = defer.Deferred()
        yield self._closewait

    def open_spider(self, spider, start_requests):
        # 初始化
        self.start_requests = start_requests
        self.spider = spider

        # 请求开始发送
        reactor.callLater(0, self._next_request)


class Crawler(object):
    def __init__(self, spidercls):
        self.spidercls = spidercls

        self.spider = None
        self.engine = None

    @defer.inlineCallbacks
    def crawl(self):
        # 创建引擎
        self.engine = ExecutionEngine()
        # 实例化spider
        self.spider = self.spidercls()

        # 启动开始请求
        start_requests = iter(self.spider.start_requests())

        # 先运行open_spider,再启动引擎,加入特殊defer
        # 开启爬虫
        self.engine.open_spider(self.spider, start_requests)

        # 启动引擎
        yield self.engine.start()


class CrawlerProcess(object):
    def __init__(self):
        # 正在运行的行为
        self._active = set()

        # 正在运行的爬虫实例
        self.crawlers = set()

    def crawl(self, spidercls, *args, **kwargs):
        crawler = Crawler(spidercls)

        self.crawlers.add(crawler)
        # 生成爬取的行为迭代器
        d = crawler.crawl(*args, **kwargs)
        self._active.add(d)
        return d

    def start(self):
        # 将所有行为都丢入
        dl = defer.DeferredList(self._active)
        # 挂载defer完成后的回调
        dl.addBoth(self._stop_reactor)  # both的意思是成功或者失败都调用
        reactor.run()

    def _stop_reactor(self, _=None):
        # 停止事件循环
        reactor.stop()


class Spider(object):
    def start_requests(self):
        for url in self.start_urls:
            yield Request(url)


class ChoutiSpider(Spider):
    name = "chouti"
    start_urls = [
        'http://dig.chouti.com/',
    ]

    def parse(self, response):
        print(response.text)


class CnblogsSpider(Spider):
    name = "cnblogs"
    start_urls = [
        'http://www.cnblogs.com/',
    ]

    def parse(self, response):
        print(response.text)


if __name__ == '__main__':

    spider_cls_list = [ChoutiSpider, CnblogsSpider]

    crawler_process = CrawlerProcess()
    for spider_cls in spider_cls_list:
        crawler_process.crawl(spider_cls)

    crawler_process.start()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值