scrapy-redis分布式爬虫,爬取当当网图书信息

前期准备
  • 虚拟机下乌班图下redis:url去重,持久化
  • mongodb:保存数据
  • PyCharm:写代码
  • 谷歌浏览器:分析要提取的数据
  • 爬取图书每个分类下的小分类下的图书信息(分类标题,小分类标题,图书标题,作者,图书简介,价格,电子书价格,出版社,封面,图书链接)

思路:按每个大分类分组,再按小分类分组,再按每本书分组,最后提取数据

下面是代码

爬虫代码

# -*- coding: utf-8 -*-
import scrapy
# 额外导入以下类
from scrapy_redis.spiders import RedisSpider
from copy import deepcopy

# 继承导入的类
class DdBookSpider(RedisSpider):
    name = 'dd_book'
    allowed_domains = ['dangdang.com']
    redis_key = "dd_book"   # redis中插入(lpush dd_book http://category.dangdang.com/?ref=www-0-C)

    def parse(self, response):
        """图书大类"""
        # 先分组
        div_list = response.xpath('//div[@class="classify_books"]/div[@class="classify_kind"]')
        for div in div_list:
            item = {}
            item["大标题"] = div.xpath('.//a/text()').extract_first()
            li_list = div.xpath('.//ul[@class="classify_kind_detail"]/li')
            for li in li_list:
                item["小标题"] = li.xpath('./a/text()').extract_first()
                sm_url = li.xpath('./a/@href').extract_first()
                #print(sm_url, item["小标题"])

                # 请求详情页
                if sm_url != "javascript:void(0);":
                    yield scrapy.Request(sm_url, callback=self.book_details, meta={"item": deepcopy(item)})

    def book_details(self, response):
        """提取图书数据"""
        item = response.meta["item"]
        # 给每本书分组
        li_list = response.xpath('//ul[@class="bigimg"]/li')
        for li in li_list:
            item["图书标题"] = li.xpath('./a/@title').extract_first()
            item["作者"] = li.xpath('./p[@class="search_book_author"]/span[1]/a/@title').extract_first()
            item["图书简介"] = li.xpath('./p[@class="detail"]/text()').extract_first()
            item["价格"] = li.xpath('./p[@class="price"]/span[@class="search_now_price"]/text()').extract_first()
            item["电子书价格"] = li.xpath('./p[@class="price"]/a[@class="search_e_price"]/i/text()').extract_first()
            item["日期"] = li.xpath('./p[@class="search_book_author"]/span[2]/text()').extract_first()
            item["出版社"] = li.xpath('./p[@class="search_book_author"]/span[3]/a/@title').extract_first()
            item["图片"] = li.xpath('./a/img/@src').extract_first()
            item["图书链接"] = li.xpath('./a/@href').extract_first()

            yield item

        # 翻页
        next_url = response.xpath('//a[text()="下一页"]/@href').extract_first()
        if next_url is not None:
            next_url = "http://category.dangdang.com" + next_url
            yield scrapy.Request(next_url, callback=self.book_details, meta={"item": deepcopy(item)})


settings.py下代码

# 一个去重的类,用来将url去重
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 一个队列
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 是否持久化
SCHEDULER_PERSIST = True
# redis地址
REDIS_URL = "redis://192.168.1.101:6379"
# user-agent
UA_LIST = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# 下载延迟
DOWNLOAD_DELAY = 1

# The download delay setting will honor only one of:
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
    'dangdang_book.middlewares.DangdangBookDownloaderMiddleware': 543,
}

# Configure item pipelines
ITEM_PIPELINES = {
    'dangdang_book.pipelines.DangdangBookPipeline': 300,
}

middlewares.py,添加随机UA

import random


class DangdangBookDownloaderMiddleware:

    def process_request(self, request, spider):
        """添加随机UA跟代理IP"""
        ua = random.choice(spider.settings.get("UA_LIST"))
        request.headers["User-Agent"] = ua
        #request.meta["proxy"] = "https://125.115.126.114:888"

    def process_response(self, request, response, spider):
        """查看UA有没有设置成功"""
        print(request.headers["User-Agent"])
        return response

pipelines.py,保存数据

from pymongo import MongoClient
client = MongoClient(host="127.0.0.1", port=27017)
db = client["dangdang_db"]


class DangdangBookPipeline:
    def process_item(self, item, spider):
        """保存数据到mongodb"""
        print(item)
        db.book.insert_one(dict(item))
        return item
运行截图

在这里插入图片描述
mongodb
在这里插入图片描述
redis
在这里插入图片描述
最后是项目
在这里插入图片描述

还有什么不足的多多指教

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值