并发scrapy
# middlewares.py
import time
from kafka import KafkaProducer, KafkaConsumer
from elasticsearch import Elasticsearch
from redis import Redis
class DistributedCrawlerMiddleware:
def __init__(self, kafka_hosts, kafka_topic, es_hosts, es_index, redis_host, redis_port):
self.producer = KafkaProducer(bootstrap_servers=kafka_hosts)
self.consumer = KafkaConsumer(kafka_topic, bootstrap_servers=kafka_hosts)
self.es = Elasticsearch(es_hosts)
self.es_index = es_index
self.redis = Redis(host=redis_host, port=redis_port)
@classmethod
def from_crawler(cls, crawler):
return cls(
kafka_hosts=crawler.settings.get('KAFKA_HOSTS'),
kafka_topic=crawler.settings.get('KAFKA_TOPIC'),
es_hosts=crawler.settings.get('ES_HOSTS'),
es_index=crawler.settings.get('ES_INDEX'),
redis_host=crawler.settings.get('REDIS_HOST'),
redis_port=crawler.settings.get('REDIS_PORT')
)
def process_request(self, request, spider):
url = request.url
if self.redis.sismember(f'crawled_urls:{spider.name}', url):
spider.logger.debug(f"URL {url} has already been crawled, skipping.")
return None
self.producer.send(self.kafka_topic, url.encode())
self.redis.sadd(f'crawled_urls:{spider.name}', url)
return request
def process_item(self, item, spider):
try:
self.es.index(index=self.es_index, document=dict(item))
except Exception as e:
spider.logger.error(f"Failed to index item in Elasticsearch: {e}")
return item
# spiders/my_spider.py
import scrapy
from ..items import CrawledPage
class MyCrawler(scrapy.Spider):
name = 'my_crawler'
def start_requests(self):
while True:
try:
for msg in self.consumer.poll(timeout_ms=1000):
url = msg.value.decode()
if self.redis.sismember(f'crawled_urls:{self.name}', url):
self.logger.debug(f"URL {url} has already been crawled, skipping.")
continue
yield scrapy.Request(url=url, callback=self.parse)
except Exception as e:
self.logger.error(f"Error consuming from Kafka: {e}")
time.sleep(5)
def parse(self, response):
item = CrawledPage()
item['url'] = response.url
item['title'] = response.css('title::text').get()
item['content'] = response.css('body::text').get()
yield item
在中间件中引入了 Redis 作为分布式去重存储。每个爬虫节点都会将已经抓取过的 URL 存入 Redis 集合中,避免重复抓取。
process_request 方法会先检查 Redis 中是否已经存在该 URL,如果存在则跳过该请求。然后将 URL 推送到 Kafka 队列中。
在 start_requests 方法中,爬虫节点会从 Kafka 消费任务,并再次检查 Redis 中是否已经抓取过该 URL,避免重复抓取。
整个过程中,数据写入 Elasticsearch 的逻辑保持不变。
并行scrapy
# app.py
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from .spiders.main_spider import MainSpider
from .middlewares.kafka_middleware import KafkaMiddleware
from .middlewares.redis_middleware import RedisMiddleware
from .middlewares.elasticsearch_middleware import ElasticsearchMiddleware
if __name__ == '__main__':
settings = get_project_settings()
process = CrawlerProcess(settings)
process.crawl(MainSpider,
kafka_hosts=settings['KAFKA_HOSTS'],
kafka_topic=settings['KAFKA_TOPIC'],
redis_host=settings['REDIS_HOST'],
redis_port=settings['REDIS_PORT'],
es_hosts=settings['ES_HOSTS'],
es_index=settings['ES_INDEX'])
process.start()
# middlewares/kafka_middleware.py
from scrapy import signals
from kafka import KafkaProducer, KafkaConsumer
class KafkaMiddleware:
@classmethod
def from_crawler(cls, crawler):
middleware = cls(
kafka_hosts=crawler.settings.get('KAFKA_HOSTS'),
kafka_topic=crawler.settings.get('KAFKA_TOPIC')
)
crawler.signals.connect(middleware.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(middleware.spider_closed, signal=signals.spider_closed)
return middleware
def __init__(self, kafka_hosts, kafka_topic):
self.producer = KafkaProducer(bootstrap_servers=kafka_hosts)
self.consumer = KafkaConsumer(kafka_topic, bootstrap_servers=kafka_hosts)
def spider_opened(self, spider):
spider.logger.info('Kafka middleware opened')
def spider_closed(self, spider):
spider.logger.info('Kafka middleware closed')
def process_request(self, request, spider):
self.producer.send(spider.kafka_topic, request.url.encode())
return request
# middlewares/redis_middleware.py
from scrapy import signals
from redis import Redis
class RedisMiddleware:
@classmethod
def from_crawler(cls, crawler):
middleware = cls(
redis_host=crawler.settings.get('REDIS_HOST'),
redis_port=crawler.settings.get('REDIS_PORT')
)
crawler.signals.connect(middleware.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(middleware.spider_closed, signal=signals.spider_closed)
return middleware
def __init__(self, redis_host, redis_port):
self.redis = Redis(host=redis_host, port=redis_port)
def spider_opened(self, spider):
spider.logger.info('Redis middleware opened')
def spider_closed(self, spider):
spider.logger.info('Redis middleware closed')
def process_request(self, request, spider):
if self.redis.sismember(f'crawled_urls:{spider.name}', request.url):
spider.logger.debug(f"URL {request.url} has already been crawled, skipping.")
return None
self.redis.sadd(f'crawled_urls:{spider.name}', request.url)
return request
# middlewares/elasticsearch_middleware.py
from scrapy import signals
from elasticsearch import Elasticsearch
class ElasticsearchMiddleware:
@classmethod
def from_crawler(cls, crawler):
middleware = cls(
es_hosts=crawler.settings.get('ES_HOSTS'),
es_index=crawler.settings.get('ES_INDEX')
)
crawler.signals.connect(middleware.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(middleware.spider_closed, signal=signals.spider_closed)
return middleware
def __init__(self, es_hosts, es_index):
self.es = Elasticsearch(es_hosts)
self.es_index = es_index
def spider_opened(self, spider):
spider.logger.info('Elasticsearch middleware opened')
def spider_closed(self, spider):
spider.logger.info('Elasticsearch middleware closed')
def process_item(self, item, spider):
try:
self.es.index(index=self.es_index, document=dict(item))
except Exception as e:
spider.logger.error(f"Failed to index item in Elasticsearch: {e}")
return item
# spiders/main_spider.py
import scrapy
from ..items import CrawledPage
class MainSpider(scrapy.Spider):
name = 'main_crawler'
def __init__(self, kafka_hosts, kafka_topic, redis_host, redis_port, es_hosts, es_index, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kafka_hosts = kafka_hosts
self.kafka_topic = kafka_topic
self.redis_host = redis_host
self.redis_port = redis_port
self.es_hosts = es_hosts
self.es_index = es_index
def start_requests(self):
while True:
try:
for msg in self.consumer.poll(timeout_ms=1000):
url = msg.value.decode()
if self.redis.sismember(f'crawled_urls:{self.name}', url):
self.logger.debug(f"URL {url} has already been crawled, skipping.")
continue
yield scrapy.Request(url=url, callback=self.parse)
except Exception as e:
self.logger.error(f"Error consuming from Kafka: {e}")
time.sleep(5)
def parse(self, response):
item = CrawledPage()
item['url'] = response.url
item['title'] = response.css('title::text').get()
item['content'] = response.css('body::text').get()
yield item
这个实例包含了以下主要组件:
app.py: 入口文件,用于启动爬虫进程。
KafkaMiddleware: 负责与 Kafka 队列交互,将待爬取的 URL 推送到 Kafka 队列中。
RedisMiddleware: 负责使用 Redis 进行分布式去重,确保每个 URL 只被抓取一次。
ElasticsearchMiddleware: 负责将抓取结果写入 Elasticsearch 数据库。
MainSpider: 主爬虫类,从 Kafka 消费任务,并在 Redis 和 Elasticsearch 中进行相应的操作。
这个实例的优点包括:
使用 Kafka 作为任务队列,可以实现高度并行的分布式抓取。
使用 Redis 进行分布式去重,避免重复抓取。
将不同的功能划分到不同的中间件中,提高了代码的可维护性和扩展性。
中间件通过 from_crawler 方法从 Scrapy 的设置中获取所需的配置,便于统一管理。
中间件在 spider_opened 和 spider_closed 事件中进行初始化和清理,确保资源的正确释放。