利用Scrapy+selenium爬取京东关键词列表所有商品

今天我们就爬取一下京东的商品列表,京东这个网站不用保持登录,但是列表有60个,必须下拉一下才可以的拿到60个商品,今天以京东手机为例,我们就全部抓取下来
在这里插入图片描述

用xpath获取试试看,我们就会看到有30个是加载渲染完的,剩下30没有渲染,我们下拉一下会发现30变60了,剩下的那30个是动态加载的
在这里插入图片描述
针对这种解决方法我们选择使用selelnium下拉一下,加载出来,我们就采用scrapy实现的,主要练技术嘛,多种方法实现,我们先创建一个scrapy项目,scrapy startproject JD,cd到目录,我们在创建一个spider文件执行 scrapy genspider dj jd.com,用pycharm打开,

先编写item,确定我们要抓取的字段

import scrapy


class JdItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    # 手机名称
    name = scrapy.Field()
    # 链接
    link = scrapy.Field()
    # 价格
    price = scrapy.Field()
    # 评论数
    comment_num = scrapy.Field()
    # 店铺名称
    shop_name = scrapy.Field()
    # 店铺链接
    shop_link = scrapy.Field()

我们编写中间件吧,其中我们借助selenium下拉到底部,全部加载出来,我们就要写MiddleWare.py,scrapy.http里 HtmlResponse模块,我们用HtmlResponse返回源代码交给spider解析渲染,记得自己配置webdriver,executable_path后面的值写路径

import time
from selenium import webdriver
from scrapy.http import HtmlResponse


class SeleniumWare(object):
    def process_request(self,spider,request):
        self.option  = webdriver.ChromeOptions()
        self.option.add_argument("--headless")
        self.driver = webdriver.Chrome(executable_path='C:\Program Files\Google\Chrome\Application\chromedriver.exe',options=self.option)
        self.driver.get(request.url)
        self.driver.implicitly_wait(10)
        self.driver.execute_script('var p = document.documentElement.scrollTop=100000')
        time.sleep(3)
        data = self.driver.page_source
        self.driver.close()
        data = str(data)
        res = HtmlResponse(body=data,encoding="utf-8",request=request,url=request.url)
        return res

编写spider爬虫

# -*- coding: utf-8 -*-
import scrapy
from JD.items import JdItem


class JdSpider(scrapy.Spider):
    name = 'jd'
    allowed_domains = ['jd.com']
    start_urls = ['https://search.jd.com/Search?keyword=手机']
    page = 2


    def parse(self, response):
    # 获取节点
        node_list = response.xpath('//div[@class="gl-i-wrap"]')
        # 打印个数
        print(len(node_list))
        # 拿出节点每个信息
        for node in node_list:
            item = JdItem()
            # 我们try一下,有些缺失的抛错,我们阻止异常,返回None
            try:
                item["name"] = node.xpath('./div[4]/a/em/text()').extract_first().strip()
            except:
                item["name"] = None
            try:
                item["link"] = response.urljoin(node.xpath('./div[4]/a/@href').extract_first())
            except:
                item["link"] = None

            try:
                item["price"] = node.xpath('./div[3]/strong/i/text()').extract_first() + '元'
            except:
                item["price"] = None

            try:
                item["comment_num"] = node.xpath('./div[5]/strong/a/text()').extract_first()
            except:
                item["comment_num"] = None

            try:
                item["shop_name"] = node.xpath('./div[7]/span/a/text()').extract_first().strip()
            except:
                item["shop_name"] = None

            try:
                item["shop_link"] = "https:" + node.xpath('./div[7]/span/a/@href').extract_first()
            except:
                item["shop_link"] = None
            print(item)
            # 返回item,交给pipline
            yield item
		# 采用拼接的方式获取下一页
        if self.page < 74:
            next_url = 'https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&page={}'.format(self.page)
            self.page += 1
            print(next_url)
            yield scrapy.Request(next_url, callback=self.parse)


我们修改下一下setting文件,主要把那几个关键的修改了,不遵守协议,设置ua,在这里也把中间件注册了,管道我们也注册好,我们保存到mongodb数据库,还有csv

# -*- coding: utf-8 -*-

# Scrapy settings for JD project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'JD'

SPIDER_MODULES = ['JD.spiders']
NEWSPIDER_MODULE = 'JD.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 ' \
             'Safari/537.36 '

# Obey robots.txt rules
# ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 0.5
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 1000000
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
# }

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'JD.middlewares.JdSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
    'JD.middlewares.SeleniumWare': 543,
}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
# }

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'JD.pipelines.SavePipeline': 300,
    'JD.pipelines.MongoPipline': 301,

}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []

管道代码如下

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import csv
from pymongo import MongoClient


class SavePipeline(object):
    def open_spider(self, spider):
        self.file = open("JD.csv", 'a', newline="", encoding="gb18030")
        self.csv_writer = csv.writer(self.file)
        self.csv_writer.writerow(["标题", "链接", '价格', "评论数", "店铺", "店铺链接"
                                  ])

    def process_item(self, item, spider):
        self.csv_writer.writerow(
            [item["name"], item["link"], item["price"],
             item["comment_num"], item["shop_name"], item["shop_link"]]
        )
        return item

    def close_spider(self, spider):
        self.file.close()


class MongoPipline(object):
    def open_spider(self, spider):
        self.client = MongoClient('127.0.0.1',27017)
        self.db = self.client['JD']
        self.col = self.db['Phone']

    def process_item(self, item, spider):
        data = dict(item)
        self.col.insert(data)
        return item

    def close_spider(self, spider):
        self.client.close()

我们来看下结果

Mongodb数据库
在这里插入图片描述
csv
在这里插入图片描述
代码就到这里结束了,有不明白的可以留言也可以私信我,喜欢的话就留赞吧
在这里插入图片描述

  • 3
    点赞
  • 23
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值