这是分类存储的代码示例
from scrapy.pipelines.images import ImagesPipeline
from scrapy import Request
class ImagesrenamePipeline(ImagesPipeline):
def get_media_requests(self, item, info):
# 循环每一张图片地址下载,若传过来的不是集合则无需循环直接yield
for image_url in item['imgurl']:
# meta里面的数据是从spider获取,然后通过meta传递给下面方法:file_path
yield Request(image_url,meta={'name':item['imgname']})
# 重命名,若不重写这函数,图片名为哈希,就是一串乱七八糟的名字
def file_path(self, request, response=None, info=None):
# 提取url前面名称作为图片名。
image_guid = request.url.split('/')[-1]
# 接收上面meta传递过来的图片名称
name = request.meta['name']
# 过滤windows字符串,不经过这么一个步骤,你会发现有乱码或无法下载
name = re.sub(r'[?\\*|“<>:/]', '', name)
# 分文件夹存储的关键:{0}对应着name;{1}对应着image_guid
filename = u'{0}/{1}'.format(name, image_guid)
return filename
# 直接在setting中设置文件存目录
IMAGES_STORE = '文件夹路径'
这是官方示例
import scrapy
from scrapy.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem
class MyImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
yield scrapy.Request(image_url)
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no images")
item['image_paths'] = image_paths
return item
还可以设置图片的大小范围参考官方文档:scrapy下载图片官方文档