Python-scrapy爬取起点榜单信息

spider
import scrapy

from qiDianTop.items import QidiantopItem

"""
初步逻辑
1.获取所有榜单类型列表 遍历列表 请求每个列表
2.获取每个列表下书籍的列表信息 
3.请求书籍详细信息获取信息
"""


class QiDianTopSpider(scrapy.Spider):
    name = 'qi_dian_top'
    allowed_domains = ['qidian.com']

    # 爬取的各种榜单
    start_urls = ['https://www.qidian.com/rank/yuepiao?style=1',
                  "https://www.qidian.com/rank/fengyun?style=1",
                  "https://www.qidian.com/rank/hotsales?style=1",
                  "https://www.qidian.com/rank/readIndex?style=1",
                  "https://www.qidian.com/rank/newFans?style=1",
                  "https://www.qidian.com/rank/recom?style=1",
                  "https://www.qidian.com/rank/collect?style=1",
                  "https://www.qidian.com/rank/vipup?style=1",
                  "https://www.qidian.com/rank/vipcollect?style=1",
                  "https://www.qidian.com/rank/vipreward?style=1"
                  ]

    def parse(self, response):
        book_list = response.xpath("//div[@id='rank-view-list']//ul/li")

        # 转到下一页
        # print("+" * 100)
        # 是否是从上一页进入的
        if "temp" in response.request.meta \
                and "page" in response.request.meta["temp"] \
                and "request_url" in response.request.meta["temp"]:
            # 保存循环当前分类排行的信息 page 为已循环页数 request_url 为该分类首页
            temp = response.request.meta["temp"]
            # 每个分类最多循环10页数据 如果大于9页说明该分类已经抓取完成
            if temp["page"] < 10:
                print("1" * 100)
                temp["page"] += 1
                # 拼装请求url
                query_url = temp["request_url"] + "&page=" + str(temp["page"])
                # 调用携程跳到下一任务 默认调用request时出现同一url会过滤该请求 如果不需要则需要 dont_filter=True
                yield scrapy.Request(query_url, callback=self.parse, meta={"temp": temp})
            else:
                print("==" * 100)
                print("当前路径列表已循环完毕->{}".format(temp["request_url"]))
        # 说明是该列表排行首次请求 直接封装数据 并yield
        else:
            print("2" * 100)
            temp_info = {}
            temp_info["request_url"] = response.request.url
            temp_info["page"] = 1
            next_request_url = temp_info["request_url"] + "&page=" + str(temp_info["page"])
            yield scrapy.Request(next_request_url, callback=self.parse, meta={"temp": temp_info})

        # 循环获取该排行页的
        for i in book_list:
            book = QidiantopItem()
            book["bid"] = i.xpath(".//div[@class='book-mid-info']/h4/a/@data-bid").get()
            book["name"] = i.xpath(".//div[@class='book-mid-info']/h4/a/text()").get()
            book["author"] = i.xpath(".//div[@class='book-mid-info']/p[@class='author']/a[@class='name']/text()").get()
            book["author_url"] = "https:" + i.xpath(
                ".//div[@class='book-mid-info']/p[@class='author']/a[@class='name']/@href").get()
            book["type"] = i.xpath(
                ".//div[@class='book-mid-info']/p[@class='author']/a[@data-eid='qd_C42']/text()").get()
            book["status"] = i.xpath(".//div[@class='book-mid-info']/p[@class='author']/span/text()").get()
            book["new_chapter"] = i.xpath(".//div[@class='book-mid-info']/p[@class='update']/a/text()").get()
            book["new_chapter_date"] = i.xpath(".//div[@class='book-mid-info']/p[@class='update']/span/text()").get()
            book["detail_url"] = "https:" + i.xpath(".//div[@class='book-img-box']/a/@href").get()

            yield scrapy.Request(book["detail_url"], callback=self.parse_detail, meta={"item": book})

    def parse_detail(self, response):
        print("3" * 100)
        book = response.meta["item"]
        book["img_url"] = "https:" + response.xpath("//a[@id='bookImg']/img/@src").get()
        book["info"] = response.xpath("//div[@class='book-intro']/p/text()").get()
        yield book

items
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class QidiantopItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    bid = scrapy.Field()
    name = scrapy.Field()
    author = scrapy.Field()
    type = scrapy.Field()
    status = scrapy.Field()
    new_chapter = scrapy.Field()
    new_chapter_date = scrapy.Field()
    detail_url = scrapy.Field()
    img_url = scrapy.Field()
    info = scrapy.Field()
    author_url = scrapy.Field()
    pass

pipelines
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from pymongo import MongoClient

from qiDianTop.items import QidiantopItem


class QidiantopPipeline:

    # spider对象创建时 调用
    def open_spider(self, spider):
        print("创建mongo连接")
        mongoClient = MongoClient(host="xxxxx", port=123)
        # 验证账号密码
        mongoClient.test.authenticate("xx", "xx")
        # 选择账号权限对应的数据库
        self.mongo_data_base = mongoClient["test"]
        # 因为是测试 所以每次创建连接时 参数特定集合的数据
        self.mongo_data_base.drop_collection("qiDian")
        pass

    # spide对象销毁时 抵用
    def close_spider(self, spider):
        pass

    def process_item(self, item, spider):
        if isinstance(item, QidiantopItem):
            self.mongo_data_base["qiDian"].insert_one(dict(item))
        return item

在这里插入图片描述

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值