scrapy爬取小说盗墓笔记

# -*- coding: utf-8 -*-
import scrapy
from daomu.items import DaomuItem

class DaomuspiderSpider(scrapy.Spider):
    name = "daomuspider"
    # allowed_domains = ["www.daomubiji.com"]
    start_urls = ['http://www.daomubiji.com/']
    index_url = 'http://www.daomubiji.com/'

    def start_requests(self):
        yield scrapy.Request(url=self.index_url,callback=self.parse_book)

    def parse_book(self, response):
        for url in response.css('.article-content a'):
            book_url = url.css('a::attr(href)').extract_first()
            yield scrapy.Request(url=book_url, callback=self.parse_chapter)

    def parse_chapter(self, response):
        item = DaomuItem()
        book_title = response.css('.focusbox .container h1::text').extract_first()
        book_info = response.css('.focusbox .container .focusbox-text::text').extract_first()
        book_url = response.url

        for chapter in response.css('.excerpts-wrapper .excerpts .excerpt'):
            chapter_title = chapter.css('a::text').extract_first().split(' ')[1] + ':'+ chapter.css('a::text').extract_first().split(' ')[-1]
            chapter_url = chapter.css('a::attr(href)').extract_first()
            
            item['book_title'] = book_title
            item['book_info'] = book_info
            item['book_url'] = book_url
            item['chapter_title'] = chapter_title
            item['chapter_url'] = chapter_url
            
            yield item
            yield scrapy.Request(url = chapter_url,callback=self.parse_detail, meta={'item':item})#重点在这里,用meta进行转移到下一个函数


    def parse_detail(self, response):
        item = response.meta['item']
        content = response.css('.article-content p::text').extract()
        item['content'] = content
        yield item
import pymongo

class DaomuPipeline(object):

    def __init__(self):
        self.mongo_uri = 'localhost'
        self.mongo_db = 'daomu'

    # @classmethod
    # def frow_crawler(cls, crawler):
    #     return cls(
    #         mongo_uri = crawler.settings.get('MONGO_URI'),
    #         mongo_db = crawler.settings.get('MONGO_DB')
    #     )

    def open_spider(self,spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]

    def process_item(self, item, spider):
        name = item.__class__.__name__
        self.db[name].insert(dict(item))#一定要注意这里用dict
        return item

    def close_spider(self, spider):
        self.client.close()

 

转载于:https://www.cnblogs.com/themost/p/7093116.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值