python scrapy 多管道

1、爬取逻辑

import scrapy
#要主要这里的引用,不要scary_test.scary_test.items import ScaryTestItem会找不到这个模块
from scary_test.items import ScaryTestItem
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor


class DangdangSpider(scrapy.Spider):
    name = "dangdang"
    allowed_domains = ["category.dangdang.com"]
    start_urls = ["http://category.dangdang.com/cp01.01.02.00.00.00.html"]

    base_url = 'http://category.dangdang.com/pg'
    page = 1

    rules = (
        Rule(
            LinkExtractor(allow=r'/pg\d+-cp01.01.02.00.00.00.html'),
            callback="parse",
            follow=True
        )
    )

    def parse(self, response):
        content = response.text
        li_list = response.xpath("//ul[@id='component_59']/li")
        print("*****************风吹草地现牛羊**********************")
        for li in li_list:
            describe = li.xpath(".//a[@name='itemlist-title']/text()").extract_first()
            author = li.xpath(".//a[@name='itemlist-author']/text()").extract_first()
            url = li.xpath(".//img/@data-original").extract_first()
            if not url:
                url = li.xpath(".//img/@src").extract_first()
            price = li.xpath(".//span[@class='search_now_price']/text()").extract_first()
            print("描述:" + describe)
            print("作者:" + author)
            print("图片:" + url)
            print("价格:" + price)
            book = ScaryTestItem(url=url,describe=describe, price=price, author=author)
            yield book

            if self.page < 10:
                self.page = self.page + 1
                url = self.base_url + str(self.page) + '-cp01.01.02.00.00.00.html'
                yield scrapy.Request(url=url, callback=self.parse)

2、定义item

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class ScaryTestItem(scrapy.Item):
    # define the fields for your item here like:
    #图片
    url = scrapy.Field()
    # 描述
    describe = scrapy.Field()
    # 价格
    price = scrapy.Field()
    # 作者
    author = scrapy.Field()

在这里插入图片描述

3、定义通道

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import urllib.request

import pymysql
from itemadapter import ItemAdapter
from scrapy.utils.project import get_project_settings


class ScaryTestPipeline:
    def open_spider(self, spider):
        self.fp = open("book.json", "w", encoding="utf-8")

    def process_item(self, item, spider):
        self.fp.write(str(item))
        return item
    def close_spider(self, spider):
        self.fp.close()

class downloadimg:
    def process_item(self, item, spider):
        url="http:"+item["url"]
        sp_list=url.split("/")
        name="./img/"+sp_list[len(sp_list)-1]
        urllib.request.urlretrieve(url=url,filename=name)
        return item

在这里插入图片描述

配置通道

管道可以有很多个 那么管道是有优先级的 优先级的范围是1到1000 值越小优先级越高
在这里插入图片描述

运行项目后就能发现已经爬取图片和json到本地

工作原理

在这里插入图片描述

配置mysql

https://blog.csdn.net/weixin_43205308/article/details/130866241?csdn_share_tail=%7B%22type%22%3A%22blog%22%2C%22rType%22%3A%22article%22%2C%22rId%22%3A%22130866241%22%2C%22source%22%3A%22weixin_43205308%22%7D

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值