selenium 与scrapy 联合用法 淘宝爬虫

 middlewares.py

# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals


class TaobaospiderSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

# 引入IgnoreRequest异常类
from scrapy.exceptions import  IgnoreRequest
# 引入response类
from scrapy.http import Response
from scrapy.http.response.html import HtmlResponse
from scrapy.http.response.text import TextResponse

# 引入selenium
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import codecs
import time
# 自定义中间件
class CustomMiddleware(object):
    def process_request(self,request, spider):
        '''
        1.返回None,继续交给其他的中间件进行处理
        2.返回request对象,会将该请求重新调度发送,直到返回结果不是request为止
        3.触发异常,IgnoreRequest,默认会去执行process_exception函数
        4.返回一个response对象,之后的所有中间件中process_reqeust将不会再调用,会去执行中间的process_response函数
        '''
        url = request.url
        opt = Options()
        opt.add_argument('--headless')
        # 创建谷歌浏览器对象
        driver = webdriver.Chrome(options=opt)
        driver.get(url)

        # 让浏览器滚动到底部
        for x in range(1,11,2):
            j = x / 10
            js = 'document.documentElement.scrollTop = document.documentElement.scrollHeight*%f'%j
            driver.execute_script(js)
            # 每次滚动等待0.5s
            time.sleep(0.5)

        # driver.save_screenshot('123.png')
        # 获取网页源代码
        page_source = driver.page_source
        # 退出

        driver.quit()

        # 根据网页源代码,创建htmlresponse对象
        # 因为返回的是文本内容,指定字符编码格式
        response = HtmlResponse(url=url,body=page_source,encoding='utf-8',request=request)

        return response

    def process_response(self,request,response,spider):
        '''
        返回结果(必须返回):
        1.返回response,会将response交给下一个middleware进行处理
        2.返回request,中间件停止执行,会重新对这个请求进行调度,执行process_request处理函数
        3.触发异常
        '''
        print('执行了process_response')
        return response

    def process_exception(self,request, exception, spider):
        # 触发异常之后,需要对这次的请求做出相应操作
        print('执行了process_exception')

#
class NextMiddleware(object):

    def process_request(self,request,spider):

        print('NextMiddleware--=---request')

    def process_response(self,request,response,spider):

        print('///NextMiddleware----->response')
        return response









 

转载于:https://my.oschina.net/u/3771014/blog/1648837

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值