scrapy爬取汽车之家图片之pipeline方法进阶

一:不利用scrapy自带的下载图片的方法

爬虫主体:
# -*- coding: utf-8 -*-
import scrapy
from car_spi.items import CarSpiItem


class CarSpider(scrapy.Spider):
    name = 'car'
    allowed_domains = ['"car.autohome.com.cn"']
    start_urls = ['https://car.autohome.com.cn/pic/series/5146.html#pvareaid=2042214']

    def parse(self, response):
        ui_boxs = response.xpath('//div[@class="uibox"]')
        for ui_box in ui_boxs:
            title = ui_box.xpath('.//div[@class="uibox-title"]/a/text()').get()
            urls = ui_box.xpath('.//ul/li/a/img/@src').getall()
            urls=list(map(lambda url:response.urljoin(url),urls)) # 把列表中元素遍历交给拉姆达表达式,返回的是map对象
            item = CarSpiItem(title=title,urls =urls)
            yield item

pipelines:
# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import os
from urllib import request

class CarSpiPipeline(object):
    def __init__(self):
        self.path = os.path.join(os.path.dirname(os.path.dirname(__file__)),'images')
        if not os.path.exists(self.path):
            os.makedirs(self.path)

    def process_item(self, item, spider):
        title = item['title']
        urls = item['urls']

        title_path = os.path.join(self.path,title)
        if not os.path.exists(title_path):
            os.makedirs(title_path)
        for url in urls:
            image_name = url.split('_')[-1]
            request.urlretrieve(url,os.path.join(title_path,image_name))
        return item

items:
# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class CarSpiItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    title = scrapy.Field()
    urls = scrapy.Field()

settings:
# -*- coding: utf-8 -*-

# Scrapy settings for car_spi project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html
imp
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值