京东分布式爬虫
# -*- coding: utf-8 -*-
import json
import scrapy
from JD.items import JdItem
# 1. 导入分布式爬虫
from scrapy_redis.spiders import RedisSpider
# 2. 继承分布式爬虫
class BookSpider(RedisSpider):
name = 'book'
# 3. 注销start_url和allowed_domains
# # 修改允许的域
# allowed_domains = ['jd.com','p.3.cn']
# # 修改起始的url
# start_urls = ['https://book.jd.com/booksort.html']
# 4. 设置redis_key
redis_key = 'jd_book'
# 5. 设置__init__
def __init__(self, *args, **kwargs):
domain = kwargs.pop('domain','')
self.allowed_domains = list(filter(None,domain.split(',')))
super(BookSpider,self).__init__(*args,**kwargs)
def parse(self, response):
# 获取所有图书大分类节点列表
big_node_list = response.xpath('//*[@id="booksort"]/div[2]/dl/dt/a')
for big_node in big_node_list[:5]:
big_category = big_node.xpath('./text()').extract_first()
big_category_link = response.urljoin(big_node.xpath('./@href').extract_first())
# 获取所有图书小分类节点列表
small_node_list = big_node.xpath('../following-sibling::dd[1]/em/a')
for small_node in small_node_list[:5]:
temp = {}
temp['big_category'] = big_category
temp['big_category_link'] = big_category_link
temp['small_category'] = small_node.xpath('./text()').extract_first()
temp['small_category_link'] = response.urljoin(small_node.xpath('./@href').extract_first())
# 模拟点击小分类链接
yield scrapy.Request(
url = temp['small_category_link'],
callback = self.parse_book_list,
meta = {'jd_wtt':temp},
)
def parse_book_list(self, response):
temp = response.meta['jd_wtt']
book_list = response.xpath('//*[@id="plist"]/ul/li/div')
for book in book_list:
item = JdItem()
item['big_category'] = temp['big_category']
item['big_category_link'] = temp['big_category_link']
item['small_category'] = temp['small_category']
item['small_category_link'] = temp['small_category_link']
book_name = book.xpath('./div[3]/a/em/text()|./div/div[2]/div[1]/div[3]/a/em/text()').extract_first()
item['bookname'] = str(book_name).strip()
item['author'] = book.xpath('./div[4]/span[1]/span/a/text()|./div/div[2]/div[1]/div[4]/span[1]/span/a/text()').extract_first()
item['link'] = book.xpath('//div[1]/a/@href|./div/div[2]/div[1]/div[3]/a/@href').extract_first()
# 获取图书编号
skuid = book.xpath('.//@data-sku').extract_first()
# 拼接图书价格地址
pri_url = 'https://p.3.cn/prices/mgets?skuIds=J_' + skuid
yield scrapy.Request(
url = pri_url,
callback = self.parse_price,
meta = {"jd_wtt_1":item}
)
def parse_price(self, response):
item = response.meta['jd_wtt_1']
dict_data = json.loads(response.body.decode())
item['price'] = dict_data[0]['p']
yield item
复制scrapy_redis的settings.py文件到JD的settings.py文件中进行部分更改
SPIDER_MODULES = ['JD.spiders']
NEWSPIDER_MODULE = 'JD.spiders'
USER_AGENT = 'scrapy-redis (+https://github.com/rolando/scrapy-redis)'
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = True
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderPriorityQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderStack"
ITEM_PIPELINES = {
# 'JD.pipelines.ExamplePipeline': 300,
'scrapy_redis.pipelines.RedisPipeline': 400,
}
# 设置redis数据库
REDIS_URL = "redis://127.0.0.1:6379"
# LOG_LEVEL = 'DEBUG'
# Introduce an artifical delay to make use of parallelism. to speed up the
# crawl.
DOWNLOAD_DELAY = 1
分布式爬虫总结
- 使用场景
- 数据量特别巨大
- 数据要求时间比较紧张
- 分布式的实现
- scrapy_redis实现分布式
- 普通爬虫实现分布式 实现去重集合与任务队列的共享
- 分布式的部署
- 若干台普通笔记本电脑
- 一台服务器虚拟若干台电脑
- 数据采集服务器若干台、管理若干台、存储若干台