Scrapy+Selenium+PhantomJS详解及入门案例(附完整代码)

scrapy处理js动态渲染页面时,
一种可以分析ajax请求,找到对应的接口,然后进行抓取
另一种就是对接selenium或者是splash模拟浏览器抓取

练习为selenium+scrapy+PhantomJS对淘宝商品进行爬取

首先要安装phantomjs和scrapy、selenium、pymongo库

创建一个spider

先写item,确定要爬取数据

import scrapy


class ScrapyseleniumproItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    collection = 'products'
    image = scrapy.Field()
    price = scrapy.Field()
    deal = scrapy.Field()
    title = scrapy.Field()

然后在spider里先得到对应的url,设置要爬取的页数等,用yield提交给middleware。
spider前半段代码:

import scrapy
from urllib.parse import quote
from scrapyseleniumPro.items import ScrapyseleniumproItem


class SeleniumtestSpider(scrapy.Spider):
    name = 'seleniumTest'
    # allowed_domains = ['www.taobao.com']
    start_urls = ['https://uland.taobao.com/sem/tbsearch?refpid=mm_26632258_3504122_32538762&keyword=']

    # https://uland.taobao.com/sem/tbsearch?refpid=mm_26632258_3504122_32538762&keyword=

    def start_requests(self):
        for keyword in self.settings.get('KEDWORDS'):
            for page in range(self.settings.get('MAX_PAGE')):
                url = self.start_urls[0] + quote(keyword)
                yield scrapy.Request(
                    url=url,
                    callback=self.parse,
                    meta={'page': page},
                    dont_filter=True,
                )

引擎拿到返回的url后,会交给中间件中的process_request进行处理,所以我们要在这一步用selenium对spider发过来的网址进行加工,用selenium加载出完整的动态渲染的页面后,得到网站的源码,最后返回一个htmlresponse对象。
middlewares完整代码:

from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from scrapy.http import HtmlResponse
from logging import getLogger

class SeleniumMiddleware():
    def __init__(self, timeout=None, service_ages=[]):
        # 设置日志对象
        self.logger = getLogger(__name__)
        self.timeout = timeout
        # 初始化webdriver对象
        self.browser = webdriver.PhantomJS(service_args=service_ages)
        # 设置页面大小
        self.browser.set_window_size(1400, 700)
        # 页面加载超时时间
        self.browser.set_page_load_timeout(self.timeout)
        self.wait = WebDriverWait(self.browser, self.timeout)

    def __del__(self):
        self.browser.close()

    def process_request(self, request, spider):
        self.logger.debug('PhantomJS is Starting')
        # 获取当前爬取页面页码
        page = request.meta.get('page')
        try:
            self.browser.get(request.url + str(page))
            self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#J_pc-search-page-nav > '
                                                                             'span.pc-search-page-item.pc-search-page'
                                                                             '-item-after.J_page-nav-item')))
            return HtmlResponse(url=request.url, body=self.browser.page_source, request=request, encoding='utf-8',
                                status=200)

        except TimeoutException:
            return HtmlResponse(url=request.url,
                                status=500,
                                request=request)

    @classmethod
    def from_crawler(cls, crawler):
        return cls(timeout=crawler.settings.get('SELENIUM_TIMEOUT'),
                   service_ages=crawler.settings.get('PHANTOMJS_SERVICE_ARGS'))

最后再返回spider,对得到的数据进行分析,爬取需要的内容
spider后半段:

    def parse(self, response):
        products = response.xpath('//*[@id="mx_5"]/ul/li')
        for product in products:
            item = ScrapyseleniumproItem()
            item['price'] = "".join(product.xpath('.//a/div[2]/text()').extract()).strip()
            item['title'] = "".join(product.xpath('.//a/div[1]/span/text()').extract()).strip()
            item['deal'] = "".join(product.xpath('.//a/div[4]/div[2]/text()').extract()).strip()
            item['image'] = str(product.xpath('.//a/img/@src').extract_first()).strip('_.webp')

            yield item

最后去pipelines里存入数据,存入到mongodb中
pipelines:


import pymongo

class ScrapyseleniumproPipeline:
    def process_item(self, item, spider):
        return item

class MongoPipeline(object):
    def __init__(self, mongo_url, mongo_db):
        self.mongo_url = mongo_url
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls, crawler):
        return cls(mongo_url=crawler.settings.get('MONGO_URL'),
                   mongo_db=crawler.settings.get('MONGO_DB'))

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_url)
        self.db = self.client[self.mongo_db]

    def process_item(self, item, spider):
        self.db[item.collection].insert(dict(item))
        return item

    def close_spider(self, spider):
        self.client.close()

特别注意的实settings中的各种设置:
详细如下:

# Scrapy settings for scrapyseleniumPro project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

KEDWORDS = ['iPad']
MAX_PAGE = 10

SELENIUM_TIMEOUT = 2
PHANTOMJS_SERVICE_ARGS = []

MONGO_URL = 'localhost'
MONGO_DB = 'taobao'

BOT_NAME = 'scrapyseleniumPro'

SPIDER_MODULES = ['scrapyseleniumPro.spiders']
NEWSPIDER_MODULE = 'scrapyseleniumPro.spiders'

LOG_LEVEL = 'ERROR'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrapyseleniumPro (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'scrapyseleniumPro.middlewares.ScrapyseleniumproSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
   'scrapyseleniumPro.middlewares.SeleniumMiddleware': 543,
}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'scrapyseleniumPro.pipelines.MongoPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

mongodb图:
在这里插入图片描述

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

独角兽小马

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值