一.将图片下载到同一个文件夹中
1.novel.py
# 需要下载的图片地址,需要是一个列表
# 如果不下载,只是将地址保存在数据库中,不需要设置列表
novel['img_url'] = [img_url]
2.在pipelines.py中自定义自己的pipeline
from scrapy.pipelines.images import ImagesPipeline
from scrapy.http import Request
class CustomImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
# 从item中获取要下载图片的url,根据url构造Request()对象,并返回该对象
image_url = item['img_url'][0]
yield Request(image_url, meta={'item': item})
def file_path(self, request, response=None, info=None):
# 用来自定义图片的下载路径
item = request.meta['item']
url = item['img_url'][0].split('/')[-2]
return '%s.jpg' % url
def item_completed(self, results, item, info):
# 图片下载完成后,返回的结果results
print(results)
return item
3.在settings中开启自己的pipeline
ITEM_PIPELINES = {
# 启用scrapy自带的图片下载ImagesPipeline
'scrapy.pipelines.images.ImagesPipeline': None,
# 如果采用自定义的CustomImagesPipeline,需要将自带的ImagesPipeline设置为None。
'NovelSpider.pipelines.CustomImagesPipeline': 1,
}
# 配置图片的保存目录
IMAGES_STORE = 'imgs'
# 在ImagesPipeline进行下载图片是,配置图片对应的Item字段
IMAGES_URLS_FIELD = 'img_url'
二.将图片分类下载(以奇书网为例)
1.在pipelines.py中自定义自己的pipeline
from scrapy.pipelines.images import ImagesPipeline
from scrapy.http import Request
class QishuImagePipeline(ImagesPipeline):
def get_media_requests(self, item, info):
image_url = item['img_url']
yield Request(image_url, meta={'item': item})
def file_path(self, request, response=None, info=None):
item = request.meta['item']
#分类名称category_name = item['category_name']
#图片名称img_name = item['img_url'].split('/')[-1]
path = category_name + '/' + img_name
return path
def item_completed(self, results, item, info):
print(results)
return item
2.在settings中开启自己的pipeline
ITEM_PIPELINES = {
'Qishu.pipelines.QishuImagePipeline': 1,
'scrapy.pipelines.images.ImagesPipeline': None,
}
IMAGES_STORE = 'images'
IMAGES_URLS_FIELD = 'img_url'