- 编写配置文件
在setting.py文件中需要配置内容
1. ROBOTSTXT_OBEY = False
2. DEFAULT_REQUEST_HEADERS 中添加浏览器 user-agent 信息
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'
}
3. 设置图片保存的路径
import os
IMAGES_STORE = os.path.join(os.path.dirname(os.path.dirname(__file__)),'images')
4. 设置 ITEM_PIPELINES 内容
ITEM_PIPELINES = {
'download_image.pipelines.DownloadImagePipeline': 300,
# 'scrapy.pipelines.images.ImagesPipeline': 300, # 这个是scrapy下载图片的专用类
}
- pipelines文件编写
from scrapy.pipelines.images import ImagesPipeline
from download_image import settings
import re
import os
class DownloadImagePipeline(ImagesPipeline):
# 图片文件还没下载之前 调用此方法
def get_media_requests(self, item, info):
media_requests = super(DownloadImagePipeline,self).get_media_requests(item,info)
for media_request in media_requests:
media_request.item = item
return media_requests
# 重写 file_path方法,该方法是图片下载完成后,保存在硬盘中时调用
def file_path(self, request, response=None, info=None, *, item=None):
origin_path = super(DownloadImagePipeline,self).file_path(request,response,info) # 原始路径
title = request.item['title'] # 获取title名字
title = re.sub(r'[\\/:\*\?"<>\|]',"",title) # 替换title中特殊字符
save_path = os.path.join(settings.IMAGES_STORE,title)# 新保存图片地址
# 文件没有存在就创建
if not os.path.exists(save_path):
os.mkdir(save_path)
image_name = origin_path.replace("full/",'') # 获取文件名
return os.path.join(save_path,image_name)
- items文件编写
import scrapy
class DownloadImageItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field() # 标题
image_urls = scrapy.Field() # 图片地址
- spider核心代码编写
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import DownloadImageItem
# 使用 CrawlSpider 类,不能重写 parse方法。
class ImageSpider(CrawlSpider):
name = 'image_spider'
allowed_domains = ['xxxx.com']
start_urls = ['https://www.xxxx.com/home?p=1']
rules = (
# 翻页url
Rule(LinkExtractor(allow=".+?p=\d+"),follow=True),
# 详情页链接
Rule(LinkExtractor(allow=".+/work/.+html"),callback="parse_detail",follow=False)
)
def parse_detail(self, response):
# 标题
title_list = response.xpath("//div[@class='details-contitle-box']/h2/text()").getall()
title = ''.join(title_list).strip()
# 图片地址
image_urls = response.xpath("//div[@class='work-show-box mt-40 js-work-content']//img/@src").getall()
yield DownloadImageItem(title=title,image_urls=image_urls)
-
执行怕爬虫
scrapy crawl image_spider -
下载结果:images 目录,title 为标题,子目录