分布式爬虫之Scrapy实例

8 篇文章 0 订阅
7 篇文章 0 订阅

并发scrapy

# middlewares.py
import time
from kafka import KafkaProducer, KafkaConsumer
from elasticsearch import Elasticsearch
from redis import Redis

class DistributedCrawlerMiddleware:
    def __init__(self, kafka_hosts, kafka_topic, es_hosts, es_index, redis_host, redis_port):
        self.producer = KafkaProducer(bootstrap_servers=kafka_hosts)
        self.consumer = KafkaConsumer(kafka_topic, bootstrap_servers=kafka_hosts)
        self.es = Elasticsearch(es_hosts)
        self.es_index = es_index
        self.redis = Redis(host=redis_host, port=redis_port)

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            kafka_hosts=crawler.settings.get('KAFKA_HOSTS'),
            kafka_topic=crawler.settings.get('KAFKA_TOPIC'),
            es_hosts=crawler.settings.get('ES_HOSTS'),
            es_index=crawler.settings.get('ES_INDEX'),
            redis_host=crawler.settings.get('REDIS_HOST'),
            redis_port=crawler.settings.get('REDIS_PORT')
        )

    def process_request(self, request, spider):
        url = request.url
        if self.redis.sismember(f'crawled_urls:{spider.name}', url):
            spider.logger.debug(f"URL {url} has already been crawled, skipping.")
            return None
        self.producer.send(self.kafka_topic, url.encode())
        self.redis.sadd(f'crawled_urls:{spider.name}', url)
        return request

    def process_item(self, item, spider):
        try:
            self.es.index(index=self.es_index, document=dict(item))
        except Exception as e:
            spider.logger.error(f"Failed to index item in Elasticsearch: {e}")
        return item

# spiders/my_spider.py
import scrapy
from ..items import CrawledPage

class MyCrawler(scrapy.Spider):
    name = 'my_crawler'

    def start_requests(self):
        while True:
            try:
                for msg in self.consumer.poll(timeout_ms=1000):
                    url = msg.value.decode()
                    if self.redis.sismember(f'crawled_urls:{self.name}', url):
                        self.logger.debug(f"URL {url} has already been crawled, skipping.")
                        continue
                    yield scrapy.Request(url=url, callback=self.parse)
            except Exception as e:
                self.logger.error(f"Error consuming from Kafka: {e}")
                time.sleep(5)

    def parse(self, response):
        item = CrawledPage()
        item['url'] = response.url
        item['title'] = response.css('title::text').get()
        item['content'] = response.css('body::text').get()
        yield item

在中间件中引入了 Redis 作为分布式去重存储。每个爬虫节点都会将已经抓取过的 URL 存入 Redis 集合中,避免重复抓取。
process_request 方法会先检查 Redis 中是否已经存在该 URL,如果存在则跳过该请求。然后将 URL 推送到 Kafka 队列中。
在 start_requests 方法中,爬虫节点会从 Kafka 消费任务,并再次检查 Redis 中是否已经抓取过该 URL,避免重复抓取。
整个过程中,数据写入 Elasticsearch 的逻辑保持不变。

并行scrapy

# app.py
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from .spiders.main_spider import MainSpider
from .middlewares.kafka_middleware import KafkaMiddleware
from .middlewares.redis_middleware import RedisMiddleware
from .middlewares.elasticsearch_middleware import ElasticsearchMiddleware

if __name__ == '__main__':
    settings = get_project_settings()
    process = CrawlerProcess(settings)

    process.crawl(MainSpider,
                  kafka_hosts=settings['KAFKA_HOSTS'],
                  kafka_topic=settings['KAFKA_TOPIC'],
                  redis_host=settings['REDIS_HOST'],
                  redis_port=settings['REDIS_PORT'],
                  es_hosts=settings['ES_HOSTS'],
                  es_index=settings['ES_INDEX'])

    process.start()

# middlewares/kafka_middleware.py
from scrapy import signals
from kafka import KafkaProducer, KafkaConsumer

class KafkaMiddleware:
    @classmethod
    def from_crawler(cls, crawler):
        middleware = cls(
            kafka_hosts=crawler.settings.get('KAFKA_HOSTS'),
            kafka_topic=crawler.settings.get('KAFKA_TOPIC')
        )
        crawler.signals.connect(middleware.spider_opened, signal=signals.spider_opened)
        crawler.signals.connect(middleware.spider_closed, signal=signals.spider_closed)
        return middleware

    def __init__(self, kafka_hosts, kafka_topic):
        self.producer = KafkaProducer(bootstrap_servers=kafka_hosts)
        self.consumer = KafkaConsumer(kafka_topic, bootstrap_servers=kafka_hosts)

    def spider_opened(self, spider):
        spider.logger.info('Kafka middleware opened')

    def spider_closed(self, spider):
        spider.logger.info('Kafka middleware closed')

    def process_request(self, request, spider):
        self.producer.send(spider.kafka_topic, request.url.encode())
        return request

# middlewares/redis_middleware.py
from scrapy import signals
from redis import Redis

class RedisMiddleware:
    @classmethod
    def from_crawler(cls, crawler):
        middleware = cls(
            redis_host=crawler.settings.get('REDIS_HOST'),
            redis_port=crawler.settings.get('REDIS_PORT')
        )
        crawler.signals.connect(middleware.spider_opened, signal=signals.spider_opened)
        crawler.signals.connect(middleware.spider_closed, signal=signals.spider_closed)
        return middleware

    def __init__(self, redis_host, redis_port):
        self.redis = Redis(host=redis_host, port=redis_port)

    def spider_opened(self, spider):
        spider.logger.info('Redis middleware opened')

    def spider_closed(self, spider):
        spider.logger.info('Redis middleware closed')

    def process_request(self, request, spider):
        if self.redis.sismember(f'crawled_urls:{spider.name}', request.url):
            spider.logger.debug(f"URL {request.url} has already been crawled, skipping.")
            return None
        self.redis.sadd(f'crawled_urls:{spider.name}', request.url)
        return request

# middlewares/elasticsearch_middleware.py
from scrapy import signals
from elasticsearch import Elasticsearch

class ElasticsearchMiddleware:
    @classmethod
    def from_crawler(cls, crawler):
        middleware = cls(
            es_hosts=crawler.settings.get('ES_HOSTS'),
            es_index=crawler.settings.get('ES_INDEX')
        )
        crawler.signals.connect(middleware.spider_opened, signal=signals.spider_opened)
        crawler.signals.connect(middleware.spider_closed, signal=signals.spider_closed)
        return middleware

    def __init__(self, es_hosts, es_index):
        self.es = Elasticsearch(es_hosts)
        self.es_index = es_index

    def spider_opened(self, spider):
        spider.logger.info('Elasticsearch middleware opened')

    def spider_closed(self, spider):
        spider.logger.info('Elasticsearch middleware closed')

    def process_item(self, item, spider):
        try:
            self.es.index(index=self.es_index, document=dict(item))
        except Exception as e:
            spider.logger.error(f"Failed to index item in Elasticsearch: {e}")
        return item

# spiders/main_spider.py
import scrapy
from ..items import CrawledPage

class MainSpider(scrapy.Spider):
    name = 'main_crawler'

    def __init__(self, kafka_hosts, kafka_topic, redis_host, redis_port, es_hosts, es_index, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.kafka_hosts = kafka_hosts
        self.kafka_topic = kafka_topic
        self.redis_host = redis_host
        self.redis_port = redis_port
        self.es_hosts = es_hosts
        self.es_index = es_index

    def start_requests(self):
        while True:
            try:
                for msg in self.consumer.poll(timeout_ms=1000):
                    url = msg.value.decode()
                    if self.redis.sismember(f'crawled_urls:{self.name}', url):
                        self.logger.debug(f"URL {url} has already been crawled, skipping.")
                        continue
                    yield scrapy.Request(url=url, callback=self.parse)
            except Exception as e:
                self.logger.error(f"Error consuming from Kafka: {e}")
                time.sleep(5)

    def parse(self, response):
        item = CrawledPage()
        item['url'] = response.url
        item['title'] = response.css('title::text').get()
        item['content'] = response.css('body::text').get()
        yield item

这个实例包含了以下主要组件:

app.py: 入口文件,用于启动爬虫进程。
KafkaMiddleware: 负责与 Kafka 队列交互,将待爬取的 URL 推送到 Kafka 队列中。
RedisMiddleware: 负责使用 Redis 进行分布式去重,确保每个 URL 只被抓取一次。
ElasticsearchMiddleware: 负责将抓取结果写入 Elasticsearch 数据库。
MainSpider: 主爬虫类,从 Kafka 消费任务,并在 Redis 和 Elasticsearch 中进行相应的操作。
这个实例的优点包括:

使用 Kafka 作为任务队列,可以实现高度并行的分布式抓取。
使用 Redis 进行分布式去重,避免重复抓取。
将不同的功能划分到不同的中间件中,提高了代码的可维护性和扩展性。
中间件通过 from_crawler 方法从 Scrapy 的设置中获取所需的配置,便于统一管理。
中间件在 spider_opened 和 spider_closed 事件中进行初始化和清理,确保资源的正确释放。

  • 10
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Scrapy是一个功能强大的Python网络爬虫框架,支持分布式爬虫分布式爬虫是将任务分发给多个机器或节点来执行,可以提高爬虫效率和速度,避免单点故障。 一个Scrapy分布式爬虫实例通常由一个爬虫节点(master)和多个工作节点(slave)组成。爬虫节点负责任务分配和调度,每个工作节点则负责实际的网页抓取和数据提取工作。爬虫节点和工作节点之间通过网络通信来协作完成任务。 实现Scrapy分布式爬虫需要以下步骤: 1. 在settings.py文件中配置分布式相关的参数,如REDIS_URL、SCHEDULER_PERSIST、DUPEFILTER_CLASS、SCHEDULER、ITEM_PIPELINES等。 2. 创建一个爬虫节点和多个工作节点,可以在不同的机器上运行。启动爬虫节点时,通过命令行传入参数`scrapy crawl [spider_name] -s JOBDIR=[job_directory]`来指定任务保存的目录。 3. 在工作节点上启动爬虫,同样需要指定任务目录和节点标识符。命令行参数格式如下:`scrapy runspider [spider_name] -s JOBDIR=[job_directory] -s NODE_ID=[node_id]` 4. 每个工作节点爬取网页后,会把提取到的数据发送给爬虫节点。爬虫节点可以对数据进行去重和存储等操作,也可以把任务分配给其他可用的工作节点。 5. 爬虫节点可以通过web接口实时监控任务状态和进度,也可以在任务完成后生成报告并邮件通知。 Scrapy分布式爬虫可以加速数据抓取、处理和存储,提高效率和可靠性。但是需要注意,分布式爬虫需要配合高速网络和高性能计算机使用,否则可能会出现性能瓶颈和任务阻塞的情况。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值