selenium 和 scrapy 配合爬取京东
spider
import scrapy
from JD.items import JdItem
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
class JdSpider(scrapy.Spider):
name = 'Jd'
# 爬虫域
allowed_domains = ['jd.com']
# 起始链接
start_urls = ['https://list.jd.com/list.html?cat=670,671,672']
def __init__(self):
"""
在爬虫内初始化 selenium
减少 selenium 的开关次数
"""
super().__init__()
# 无界面模式
# self.firefox = Options()
# self.firefox.add_argument('--headless')
# self.browser = webdriver.Firefox(firefox_options=self.firefox)
# 有界面
self.browser = webdriver.Firefox()
# 超时设置
self.browser.set_page_load_timeout(15)
def closed(self, spider):
""" 爬虫结束自动关闭 selenium """
self.browser.close()
self.browser.quit()
def parse(self, response):
item = JdItem()
li_list = response.xpath('//ul[@class="gl-warp clearfix"]/li')
for li in li_list:
url = li.xpath('.//div[@class="p-name"]/a/@href').extract()[0]
item['url'] = 'https:' + url
item['name'] = li.xpath('.//div[@class="p-name"]//em/text()').extract()[0].strip()
item['price'] = li.xpath('.//div[@class="p-price"]/strong[@class="J_price"]/i/text()').extract()[0]
item['comment'] = li.xpath('.//div[@class="p-commit p-commit-n"]//a[@class="comment"]//text()').extract()[0]
yield item
next_page = response.xpath('.//a[@class="pn-next"]/@href').extract()
if next_page:
next_page = 'https://list.jd.com' + next_page[0]
yield scrapy.Request(next_page, callback=self.parse)
Middlewares
修改 JdDownloaderMiddleware 中的 process_response() 方法
from scrapy.http import HtmlResponse
class JdDownloaderMiddleware(object):
def process_response(self, request, response, spider):
if spider.name == 'Jd':
# 使用 selenium 打开链接 返回渲染页面
spider.browser.get(url=request.url)
# 模拟下拉浏览器
js = "window.scrollTo(0,document.body.scrollHeight)"
spider.browser.execute_script(js)
row_response = spider.browser.page_source
return HtmlResponse(url=spider.browser.current_url, body=row_response, encoding="utf8", request=request)
else:
return response
items
import scrapy
class JdItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
url = scrapy.Field()
name = scrapy.Field()
price = scrapy.Field()
comment = scrapy.Field()
settings
打开 setting中 的 DOWNLOADER_MIDDLEWARES 和 ITEM_PIPELINES,在终端运行爬虫就可以看到爬到的内容了。
pipelines
打开 settings 中的 ITEM_PIPELINES 后就可以在 pipelines 中将爬到的数据保存在数据库中
mongodb 没有格式限制,还可以自动创建不存在的库,直接添加数据即可。
from pymongo import MongoClient
client = MongoClient(host='IP', port=27017)
collection = client['库']['集合']
class JdPipeline(object):
def process_item(self, item, spider):
# 需要转换为字典格式才能存入数据库
collection.save(dict(item))
return item