将selenium应用到scrapy本身并不复杂,复杂的是请求页面的时候,经常会提示timeout,而你很难很快找到解决办法,下面设置了两个时间点,单位为s,超时时间根据各自的应用决定。
import sys
import time
import random
import traceback
from selenium.common.exceptions import TimeoutException
reload(sys)
sys.setdefaultencoding('utf8')
from pyquery import PyQuery as pq
import scrapy
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class EListSpider2(scrapy.Spider):
name="e_list_spider2"
def __init__(self,*args, **kwargs):
super(EListSpider2, self).__init__(*args, **kwargs)
self.start_urls = ['http://s.weibo.com/weibo/']
fp = webdriver.FirefoxProfile()
fp.set_preference('http.response.timeout', 360)
self.driver = webdriver.Firefox(firefox_profile=fp)
def parse(self,response):
try:
self.driver.set_page_load_timeout(360)
self.driver.get("http://s.weibo.com/weibo/")
except Exception,e:
print u'出现异常', type(e)
print traceback.print_exc()
print u'正在浏览器中打开页面'
try:
# self.driver.set_page_load_timeout(5)
self.driver.maximize_window()
except Exception,e:
print u'出现异常', type(e)
print traceback.print_exc()
print u'浏览器最大化'
time.sleep(random.uniform(2,3))
print '-------------------------------------over---------------------------'