-- coding: utf-8 --
import scrapy
from testspider.items import TestspiderItem
class TestbaiduSpider(scrapy.Spider):
name = ‘testbaidu’
allowed_domains = [‘baidu.com’]
start_urls = [‘http://tieba.baidu.com/f?kw=%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB&ie=utf-8’,]
def parse(self, response):
data_list = response.xpath('//li[@class="j_thread_list clearfix"]')
print(len(data_list))
for line in data_list:
item = TestspiderItem()
item['title'] = line.xpath('.//div[contains(@class,"threadlist_title pull_left j_th_tit ")]/a/text()').extract()
item['author'] = line.xpath('.//div[contains(@class,"threadlist_author pull_right")]//span[contains(@class,"frs-author-name-wrap")]/a/text()').extract()
item['reply'] = line.xpath('.//div[contains(@class,"col2_left j_threadlist_li_left")]/span/text()').extract()
yield item
哪位大神知道,为什么打印print(len(data_list))的长度为 0 ??
通过模拟访问百度贴吧,然后保存response信息,发现html格式非法。不知道是不是导致没法通过xpath解析的原因?
import requests
import time
url = ‘http://tieba.baidu.com/f?kw=%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB&ie=utf-8’
headers = {
‘User-Agent’: ‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763’
}
r= requests.get(url, headers=headers)
time.sleep(3)
with open(‘baidu_tieba.html’,‘wb’)as fp:
fp.write(r.content)
print(r.url)
问题已经找到. 由于网页是动态加载,所有直接通过xpath分析网页解析,是没法读取数据的。
所以,data_list = response.xpath(’//li[@class=“j_thread_list clearfix”]’) ,data_list长度一直就是0,
解决办法:
在setttings.py中开启下载中间件spiderDownloaderMiddleware,然后在middlewares.py中重写:
def process_request(self, request, spider):
#创建一个参数对象,使chrome无界面化
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
self.driver = webdriver.Chrome(r'D:\chromedriver_win32\chromedriver.exe',options=chrome_options)
print("##middeware####URL=%s"%request.url)
self.driver.getrequest(request.url)
sleep(2)
# 获取网页内容
browser_html = self.driver.page_source
self.driver.quit()
return scrapy.http.HtmlResponse(url=request.url, body=browser_html.encode('utf-8'), encoding='utf-8',request=request)
这样做的目的,当spider发送贴吧网页,中间经过引擎->调度器,然后引擎发送给下载器之前会经过下载中间件 ,也就是process_request函数会捕获到该网页,应用了selenium+webdriver.chrome的方式.来模拟浏览器访问该网页,最后通过scrapy.http.HtmlResponse返回,经过引擎然后透传给spider,最后在def parse(self, response):函数中,就能通过xpath正常解析了