scrapy中间件包含两块,爬虫中间件,下载中间件,主要以下载中间件,middlewares.py修改后如下
方法描述
先在setting打开中件间配置:DOWNLOADER_MIDDLEWARES = {'loadImg.middlewares.LoadimgDownloaderMiddleware': 543,}
当前文件:D:\python_test\scrapyProject\loadImg\loadImg\middlewares.py
from scrapy import signals
class LoadimgDownloaderMiddleware:
#类方法:作用是返回一个下载器对象(忽略)
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
#拦截处理所有的请求对象
#参数:request就是拦截到的请求对象,spider爬虫文件中爬虫类实例化的对象
#spider.类方法的变量名,获取数据
def process_request(self, request, spider):
print(spider.name) #name就爬虫文件的类变量
return None
#拦截处理所有的响应对象
#参数:response就是拦截到的响应对象,request是被拦截到响应对象对应的唯一的一个请求对象
def process_response(self, request, response, spider):
return response
#拦截和处理发生异常的请求对象
#参数:reqeust就是拦截到的发生异常的请求对象
def process_exception(self, request, exception, spider):
return request # 发生异常后可以修正数据后重新发起请求
#控制日志数据的(忽略)
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
例子
先在setting打开中件间配置:DOWNLOADER_MIDDLEWARES = {'loadImg.middlewares.LoadimgDownloaderMiddleware': 543,}
当前文件:D:\python_test\scrapyProject\loadImg\loadImg\middlewares.py
from scrapy import signals
class LoadimgDownloaderMiddleware:
def process_request(self, request, spider):
# request.meta['proxy'] = 'http://ip:port'
# request.headers['User-Agent'] = 'UA值'
# request.headers['cookie'] = 'xxx'
print(request.url + ':请求对象拦截成功!')
return None
def process_response(self, request, response, spider):
print(request.url + ':响应对象拦截成功!')
return response
def process_exception(self, request, exception, spider):
print(request.url + ':发生异常的请求对象被拦截到!')
# request.meta['proxy'] = 'https://ip:port'
return request # 对请求对象进行重新发送