scrapy对接selenium(下载中间件的使用)及 pyppetter对接方法集合

from scrapy import signals
import pyppeteer
import asyncio
import os
import time
import json
import tkinter
from scrapy.http import HtmlResponse
from pyppeteer.dialog import Dialog
from Aliexpress.ConfigDB import RedisDB,RedisPool
from scrapy.downloadermiddlewares.retry import RetryMiddleware
import logging
pyppeteer_level = logging.WARNING
logging.getLogger('pyppeteer').setLevel(pyppeteer_level)
logging.getLogger('websockets.protocol').setLevel(pyppeteer_level)
pyppeteer_logger = logging.getLogger('pyppeteer')
pyppeteer_logger.setLevel(logging.WARNING)
# redisconn=RedisDB(db=0)
redisconn=RedisPool(db=0)
pyppeteer.DEBUG = False


# def _patch_pyppeteer():
#     from typing import Any
#     from pyppeteer import connection, launcher
#     import websockets.client

#     class PatchedConnection(connection.Connection):  # type: ignore
#         def __init__(self, *args: Any, **kwargs: Any) -> None:
#             super().__init__(*args, **kwargs)
#             # the _ws argument is not yet connected, can simply be replaced with another
#             # with better defaults.
#             self._ws = websockets.client.connect(
#                 self._url,
#                 loop=self._loop,
#                 # the following parameters are all passed to WebSocketCommonProtocol
#                 # which markes all three as Optional, but connect() doesn't, hence the liberal
#                 # use of type: ignore on these lines.
#                 # fixed upstream but not yet released, see aaugustin/websockets#93ad88
#                 max_size=None,  # type: ignore
#                 ping_interval=None,  # type: ignore
#                 ping_timeout=None,  # type: ignore
#             )

#     connection.Connection = PatchedConnection
#     # also imported as a  global in pyppeteer.launcher
#     launcher.Connection = PatchedConnection

# class DownloaderMiddleware(object):
#     # Not all methods need to be defined. If a method is not defined,
#     # scrapy acts as if the downloader middleware does not modify the
#     # passed objects.

#     def __init__(self):
#         # print("Init downloaderMiddleware use pypputeer.")
#         # os.environ['PYPPETEER_CHROMIUM_REVISION'] = '588429'
#         # pyppeteer.DEBUG = False
#         print(os.environ.get('PYPPETEER_CHROMIUM_REVISION'))
#         loop = asyncio.get_event_loop()
#         task = asyncio.ensure_future(self.getbrowser())
#         loop.run_until_complete(task)

#         # self.browser = task.result()
#         # print(self.browser)
#         # print(self.page)
#         # self.page = await browser.newPage()

#     async def getbrowser(self):
#         ua=redisconn.sget("user-agent")
#         redisconn.rpush('user-agent',ua)
#         # proxies=redis_ua.sget("daxiangIP")
#         # redisconn.lpush('proxies:ipx',proxies)
#         # Proxies=str(proxies,encoding='utf-8')
#         # print(ua,Proxies,">"*30)
#         self.browser = await pyppeteer.launch({'headless': False,'timeout':0, 
#                                           'args': [
#                                               '--window-size={1300},{600}',
#                                               '--disable-extensions',
#                                               '--hide-scrollbars',
#                                               '--disable-bundled-ppapi-flash',
#                                               '--mute-audio',
#                                               '--no-sandbox',
#                                               '--disable-setuid-sandbox',
#                                               '--disable-gpu',
#                                               '--disable-infobars',
#                                               '--proxy-server=http://http-dyn.abuyun.com:9020',
#                                           ],
#                                           'dumpio': True
#                                           })
#         # self.browser = await pyppeteer.connect({'browserWSEndpoint': 'ws://172.20.3.221:3001?--proxy-server=http://http-dyn.abuyun.com:9020',
#         # #                                 #     'headless': False,'timeout':0,
#         # #                                 #     'args': [
#         # #                                 #       '--window-size={1300},{600}',
#         # #                                 #       '--disable-extensions',
#         # #                                 #       '--hide-scrollbars',
#         # #                                 #       '--disable-bundled-ppapi-flash',
#         # #                                 #       '--mute-audio',
#         # #                                 #       '--no-sandbox',
#         # #                                 #       '--disable-setuid-sandbox',
#         # #                                 #       '--disable-gpu',
#         # #                                 #       '--disable-infobars',
#         # #                                 #       '--proxy-server=http://http-dyn.abuyun.com:9020',
#         # #                                 #   ],
#         # #                                 #   'dumpio': True
#                                             # })
#         # self.browser = await pyppeteer.connect({'browserWSEndpoint': 'ws://172.20.3.221:3001'})
#         self.page = await self.browser.newPage()
#         # await self.page.setExtraHTTPHeaders({'Proxy-Authorization': 'Basic H6587BH09900664D:20C4314AF6C62E0F'})
#         await self.page.authenticate({"username": "H6587BH09900664D", "password": "20C4314AF6C62E0F"})          
#         # await self.page.setExtraHTTPHeaders("http://http-dyn.abuyun.com:9020") 
#         # await self.page.setCookie(cookie)
#         # await self.page.setViewport(viewport={'width': 1366, 'height': 768})
#         await self.page.setUserAgent(str(ua,encoding='utf-8'))
#         # await self.page.setUserAgent("Chrome (AppleWebKit/537.1; Chrome50.0; Windows NT 6.3) AppleWebKit/537.36 (KHTML like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393")
#         await self.page.setJavaScriptEnabled(enabled=True)
#         await self.page.evaluate(
#             '''() =>{ Object.defineProperties(navigator,{ webdriver:{ get: () => false } }) }''')  
#         await self.page.evaluate('''() =>{ window.navigator.chrome = { runtime: {},  }; }''')
#         await self.page.evaluate(
#             '''() =>{ Object.defineProperty(navigator, 'languages', { get: () => ['en-US', 'en'] }); }''')
#         await self.page.evaluate(
#             '''() =>{ Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3, 4, 5,6], }); }''')
#         await self.page.waitFor(10000)
#         # cooki=redis_ua.get("tbcookie")
#         # redis_ua.rpush('tbcookie',cooki)
#         # cookieStr=str(cooki,encoding='utf-8')
#         # for cook in json.loads(cookieStr):
#         #     await self.page.setCookie(cook)
#         return self.page


#     @classmethod
#     def from_crawler(cls, crawler):
#         # This method is used by Scrapy to create your spiders.
#         s = cls()
#         crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
#         return s

#     def process_request(self, request, spider):
#         # Called for each request that goes through the downloader
#         # middleware.

#         # Must either:
#         # - return None: continue processing this request
#         # - or return a Response object
#         # - or return a Request object
#         # - or raise IgnoreRequest: process_exception() methods of
#         #   installed downloader middleware will be called
#         loop = asyncio.get_event_loop()
#         task = asyncio.ensure_future(self.usePypuppeteer(request))
#         loop.run_until_complete(task)
#         # return task.result()
#         return HtmlResponse(url=request.url, body=task.result(), encoding="utf-8", request=request)

#     async def intercept_request(self,req):
#         if req.resourceType in ['image', 'media', 'eventsource', 'websocket']:
#             await req.abort()
#         else:
#             await req.continue_()

#     async def intercept_response(self,res):
#         resourceType = res.request.resourceType
#         # if resourceType in ['xhr', 'fetch']:
#         #     resp = await res.text()

#     async def usePypuppeteer(self, request):
#         ua=redisconn.sget("user-agent")
#         redisconn.rpush('user-agent',ua)
#         await self.page.setUserAgent(str(ua,encoding='utf-8'))
#         # redisconn=RedisDB(db=0)
#         # cooki=redisconn.get("tbcookie")
#         # redisconn.lpush('tbcookie',cooki)
#         # cookieStr=str(cooki,encoding='utf-8')
#         # for cook in json.loads(cookieStr):
#         #     await self.page.setCookie(cook)
#         await self.page.goto(request.url, {'timeout': 0})
#         # self.page.on('dialog', lambda dialog: asyncio.ensure_future(self.handle_dialog(page,dialog)))
#         await asyncio.sleep(10)
#         # try:
#         #     # self.dialog_close= self.page.querySelector('body > div.next-overlay-wrapper.opened > div.next-overlay-inner.next-dialog-container > div')
#         #     self.dialog_close=self.page.querySelector('.next-dialog next-closeable ui-newuser-layer-dialog > a')
#         #     # print(self.dialog_close.content)
#         #     # self.dialog_close.click()
#         # except Exception as e:
#         #     print("error>>%s"%e)
#         try:
            
#             if R'/store' in request.url or R"SearchText=" in request.url:
#                 # await self.page.setRequestInterception(True)
#                 # self.page.on('request', self.intercept_request)
#                 # self.page.on('response', self.intercept_response)
#                 # self.ck= await self.page.querySelector('.next-btn next-medium next-btn-normal next-pagination-item next-current')
#                 # await self.page.goto(request.url,{'timeout':0})
#                 await self.page.evaluate('window.scrollBy(0, document.body.scrollHeight*5/6)')
#                 await asyncio.sleep(3)
#                 await self.page.evaluate('window.scrollBy(0, document.body.scrollHeight)')
#                 await asyncio.sleep(6)
#                 # await self.page.evaluate('window.scrollBy(document.body.scrollHeight,document.body.scrollHeight-500)')
#                 # await asyncio.sleep(8)
#             else:
#                 # self.ck= await self.page.querySelector('#product-detail   ul > li:nth-child(3) > div > span')
#                 # await self.ck.click()
#                 for i in range(0,6):
#                     await self.page.evaluate('window.scrollBy(0, {})'.format(800*i))
#                     await asyncio.sleep(5)
#             # await self.page.evaluate('window.scrollBy(0, document.body.scrollHeight)')       
#             content = await self.page.content()
#         except Exception as e:
#             print("error >>",e)
#             content="error"
#         return content

#     async def handle_dialog(self,page,dialog: Dialog):
#         await page.waitFor(5000)
#         await dialog.dismiss()

#     def process_response(self, request, response, spider):
#         # Called with the response returned from the downloader.

#         # Must either;
#         # - return a Response object
#         # - return a Request object
#         # - or raise IgnoreRequest
#         return response

#     def process_exception(self, request, exception, spider):
#         # Called when a download handler or a process_request()
#         # (from other downloader middleware) raises an exception.

#         # Must either:
#         # - return None: continue processing this exception
#         # - return a Response object: stops process_exception() chain
#         # - return a Request object: stops process_exception() chain
#         pass

#     def spider_opened(self, spider):
#         spider.logger.info('Spider opened: %s' % spider.name)

# _patch_pyppeteer()

class GetFailedUrl(RetryMiddleware):
    def __init__(self, settings):
        self.max_retry_times = settings.getint('RETRY_TIMES')
        self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))
        self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')
 
    def process_response(self, request, response, spider):
        # print(response.text,)
        # request_url=str(request)[5:-1]
        if response.status != 200 or R"robot check" in response.text or R"Sorry, we have detected unusual traffic from your network" in response.text or R"亲,小二正忙,滑动一下马上回来" in response.text:
            # print("*."*20)
            # with open(str(spider.name) + ".txt", "a") as f:
            #     f.write(response.url + "\n")
            # # print(response.url,"*"*100)
            # if R"www.amazon.com/s?k=" in request_url:
            #     db = MongoDB("AddTask","Add_List")
            #     items={}
            #     items["cid"]=GetAmazonUrlCID(request_url)
            #     items['kwURL']=[]
            #     items['Finished_time']=parse(str(datetime.now())[0:19])
            #     items['link_url']=request_url
            #     items['status']="failed"
            #     # items['error']=str(exception)
            #     if db.getInfo({'id':int(items["cid"])}):
            #         db.updateDict({'id':int(items['cid'])},{'$set':items})
            #     # print("*"*100,str(exception))
            # else:
            #     db_ = MongoDB("AmazonDB","AmazonContent")
            #     items={}
            #     items['id']=db_.getID("amazonUrl")
            #     items["cid"]=GetAmazonUrlCID(request_url)
            #     items['Finished_time']=parse(str(datetime.now())[0:19])
            #     items['link_url']=request_url
            #     items['status']="failed"
            #     # items['error']=str(exception)
            #     db_.insertDict(items)
            return request
        else:
            # print("r*"*20)
            return response
 
#     def process_exception(self, request, exception, spider):
#         if isinstance(exception, self.EXCEPTIONS_TO_RETRY):
#             request_url=str(request)[5:-1]
#             with open(str(spider.name) + ".txt", "a") as f:
#                 f.write(request_url + "\n")
#                 print("*"*20,request_url)
#             if R"www.amazon.com/s?k=" in request_url:
#                 db = MongoDB("AddTask","Add_List")
#                 items={}
#                 items["cid"]=GetAmazonUrlCID(request_url)
#                 items['kwURL']=[]
#                 items['Finished_time']=parse(str(datetime.now())[0:19])
#                 items['link_url']=request_url
#                 items['status']="failed"
#                 items['error']=str(exception)
#                 if db.getInfo({'id':int(items["cid"])}):
#                     db.updateDict({'id':int(items['cid'])},{'$set':items})
#                 # print("*"*100,str(exception))
#             else:
#                 db_ = MongoDB("AmazonDB","AmazonContent")
#                 items={}
#                 items['id']=db_.getID("amazonUrl")
#                 items["cid"]=GetAmazonUrlCID(request_url)
#                 items['Finished_time']=parse(str(datetime.now())[0:19])
#                 items['link_url']=request_url
#                 items['status']="failed"
#                 items['error']=str(exception)
#                 db_.insertDict(items)
#             return None



# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from scrapy.http import HtmlResponse
from logging import getLogger
import time
from selenium.webdriver.chrome.options import Options

class DownloaderMiddleware():
    # 经常需要在pipeline或者中间件中获取settings的属性,可以通过scrapy.crawler.Crawler.settings属性
    @classmethod
    def from_crawler(cls, crawler):
        # 从settings.py中,提取selenium设置参数,初始化类
        return cls(timeout=crawler.settings.get('SELENIUM_TIMEOUT'),
                   isLoadImage=crawler.settings.get('LOAD_IMAGE'),
                   windowHeight=crawler.settings.get('WINDOW_HEIGHT'),
                   windowWidth=crawler.settings.get('WINDOW_WIDTH')
                   )

    def __init__(self, timeout=30, isLoadImage=True, windowHeight=None, windowWidth=None):
        self.logger = getLogger(__name__)
        self.timeout = timeout
        self.isLoadImage = isLoadImage
        # 定义一个属于这个类的browser,防止每次请求页面时,都会打开一个新的chrome浏览器
        # 这样,这个类处理的Request都可以只用这一个browser
        opts = Options()
        opts.add_argument("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36")
        opts.add_experimental_option('excludeSwitches', ['enable-automation']) 
        opts.add_experimental_option('useAutomationExtension', False)
        self.browser = webdriver.Chrome(chrome_options=opts,executable_path="/opt/google/chrome/chromedriver")
        if windowHeight and windowWidth:
            self.browser.set_window_size(900, 900)
        self.browser.set_page_load_timeout(self.timeout)        # 页面加载超时时间

        script = 'Object.defineProperty(navigator,"webdriver",{get:() => false,});'
        self.browser.execute_script(script)
        self.browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": """
                Object.defineProperty(navigator, 'webdriver', {
                get: () => undefined
                })
            """
            })
        self.browser.maximize_window()
        # self.browser = close_win(browser)
        # pros = get_products(browser)
        #商品内页的浏览器
        browser2 = webdriver.Chrome(chrome_options=opts,executable_path="/opt/google/chrome/chromedriver")
        script = 'Object.defineProperty(navigator,"webdriver",{get:() => false,});'
        browser2.execute_script(script)
        browser2.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": """
                Object.defineProperty(navigator, 'webdriver', {
                get: () => undefined
                })
            """
            })

        self.wait = WebDriverWait(self.browser, 25)             # 指定元素加载超时时间
    def process_request(self, request, spider):
        res=self.browser.get(request.url)
        print("*"*100)
    # try:
        if R'/store' in request.url or R"SearchText=" in request.url:
            # await self.page.setRequestInterception(True)
            # self.page.on('request', self.intercept_request)
            # self.page.on('response', self.intercept_response)
            # self.ck= await self.page.querySelector('.next-btn next-medium next-btn-normal next-pagination-item next-current')
            # await self.page.goto(request.url,{'timeout':0})
            self.browser.execute_script("window.scrollTo(0,document.body.scrollHeight*4/6);")
            time.sleep(3)
            self.browser.execute_script("window.scrollTo(0,document.body.scrollHeight*4/5);")
            # self.wait.until(EC.presence_of_element_located((By.XPATH, '//*[@class="next-btn next-medium next-btn-normal next-pagination-item next-current"]/text()')))
            time.sleep(6)
            # await self.page.evaluate('window.scrollBy(document.body.scrollHeight,document.body.scrollHeight-500)')
            # await asyncio.sleep(8)
        else:
            # self.ck= await self.page.querySelector('#product-detail   ul > li:nth-child(3) > div > span')
            # await self.ck.click()
            for i in range(0,6):
                # await self.page.evaluate('window.scrollBy(0, {})'.format(800*i))
                self.browser.execute_script("window.scrollTo(0,{});".format(800*i))
                time.sleep(5)
        # await self.page.evaluate('window.scrollBy(0, document.body.scrollHeight)')       
        # content = self.browser.content()
    # except Exception as e:
    #     print("error >>",e)
    #     content="error"
        return HtmlResponse(url=request.url,
                            body=self.browser.page_source,
                            request=request,
                            # 最好根据网页的具体编码而定
                            encoding='utf-8',
                            status=200)
    # self.logger.debug('chrome is getting page')
    # 依靠meta中的标记,来决定是否需要使用selenium来爬取
    # usedSelenium = request.meta.get('usedSelenium', False)
    # if usedSelenium:
    #     try:
    #         self.browser.get(request.url)
    #         # 搜索框是否出现
    #         input = self.wait.until(
    #             EC.presence_of_element_located((By.XPATH, "//div[@class='nav-search-field ']/input"))
    #         )
    #         time.sleep(2)
    #         input.clear()
    #         input.send_keys("iphone 7s")
    #         # 敲enter键, 进行搜索
    #         input.send_keys(Keys.RETURN)
    #         # 查看搜索结果是否出现
    #         searchRes = self.wait.until(
    #             EC.presence_of_element_located((By.XPATH, "//div[@id='resultsCol']"))
    #         )
    #     except Exception as e:
    #         # self.logger.debug(f'chrome getting page error, Exception = {e}')
    #         print(f"chrome getting page error, Exception = {e}")
    #         return HtmlResponse(url=request.url, status=500, request=request)
    #     else:
    #         time.sleep(3)
    #         return HtmlResponse(url=request.url,
    #                             body=self.browser.page_source,
    #                             request=request,
    #                             # 最好根据网页的具体编码而定
    #                             encoding='utf-8',
    #                             status=200)


# from selenium import webdriver
# from scrapy.http import HtmlResponse
# from scrapy.exceptions import IgnoreRequest
# from queue import Queue
# from scrapy.utils.project import get_project_settings
# import time

# class SeleniumMiddleware(object):
#     def __init__(self):
#         # Initialize browser
#         options = webdriver.FirefoxOptions()
#         options.add_argument('-headless')
#         self.browser = webdriver.Chrome(executable_path=r'C:\Users\su\Desktop\geckodriver-v0.19.1-win64\geckodriver.exe',firefox_options=options)

#         # get project settings
#         settings=get_project_settings()
#         concurrent_requests=settings.get('CONCURRENT_REQUESTS')

#         # Initialize tabs
#         while len(self.browser.window_handles) < concurrent_requests:
#             self.browser.execute_script('''window.open("","_blank");''')

#         # Initialize window handles queue
#         self.handle_queue=Queue(maxsize=concurrent_requests)
#         for handle in self.browser.window_handles:
#             self.handle_queue.put(handle)

#         # Initialize requests dict
#         self.requests={}

#     def process_request(self, request, spider):
#         result=self.requests.get(request.url)
#         if result is None:
#             # get a free window_handle from queue
#             if self.handle_queue.empty():
#                 return HtmlResponse(url=request.url,request=request, encoding='utf-8', status=202)
#             handle = self.handle_queue.get()

#             # open url by js
#             self.browser.switch_to.window(handle)
#             js = r"location.href='%s';" % request.url
#             self.browser.execute_script(js)

#             # wait for 1s to avoid some bug ("document.readyState" will return a "complete" at the first)
#             time.sleep(1)

#             # mark url
#             self.requests[request.url]={'status':'waiting','handle':handle}

#             return HtmlResponse(url=request.url,request=request, encoding='utf-8', status=202)

#         elif result['status']=='waiting':

#             # switch to the tab to check page status using javascript
#             handle = result['handle']
#             self.browser.switch_to.window(handle)
#             document_status=self.browser.execute_script("return document.readyState;")

#             if document_status=='complete':
#                 self.requests[request.url]['status'] = 'done'
#                 self.handle_queue.put(handle)
#                 return HtmlResponse(url=request.url, body=self.browser.page_source, request=request, encoding='utf-8',
#                                     status=200)
#             else:
#                 return HtmlResponse(url=request.url, request=request, encoding='utf-8', status=202)

#         elif result['status']=="done":
#             # Filter repeat URL
#             raise IgnoreRequest

#     def __del__(self):
#         self.browser.quit()




 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值