scrapy爬取京东的数据

import scrapy
from scrapy import Field


class JdItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    title = scrapy.Field()  # 标题

    price = scrapy.Field()  # 价格

    comment_num = scrapy.Field()  # 评价条数

    url = scrapy.Field()  # 商品链接

    info = scrapy.Field()  # 详细信息
# -*- coding: utf-8 -*-

import scrapy
import time
from JD.items import JdItem


class JingdongSpider(scrapy.Spider):
    name = 'jd'
    allowed_domains = ['jd.com']  # 有的时候写个www.jd.com会导致search.jd.com无法爬取
    keyword = "手机"
    page = 1
    url = 'https://search.jd.com/Search?keyword=%s&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%s&cid2=653&cid3=655&page=%d&click=0'
    next_url = 'https://search.jd.com/s_new.php?keyword=%s&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%s&cid2=653&cid3=655&page=%d&scrolling=y&show_items=%s'

    def start_requests(self):
        yield scrapy.Request(self.url % (self.keyword, self.keyword, self.page), callback=self.parse)

    def parse(self, response):
        """
        爬取每页的前三十个商品,数据直接展示在原网页中
        :param response:
        :return:
        """
        ids = []
        for li in response.xpath('//*[@id="J_goodsList"]/ul/li'):
            item = JdItem()

            title = li.xpath('div/div/a/em/text()').extract()  # 标题
            price = li.xpath('div/div/strong/i/text()').extract()  # 价格
            comment_num = li.xpath('div/div/strong/a/text()').extract()  # 评价条数
            id = li.xpath('@data-pid').extract()  # id
            ids.append(''.join(id))

            url = li.xpath('div/div[@class="p-name p-name-type-2"]/a/@href').extract()  # 需要跟进的链接

            item['title'] = ''.join(title)
            item['price'] = ''.join(price)
            item['comment_num'] = ''.join(comment_num)
            item['url'] = ''.join(url)

            if item['url'].startswith('//'):
                item['url'] = 'https:' + item['url']
            elif not item['url'].startswith('https:'):
                item['info'] = None
                yield item
                continue

            yield scrapy.Request(item['url'], callback=self.info_parse, meta={"item": item})

        headers = {'referer': response.url}
        # 后三十页的链接访问会检查referer,referer是就是本页的实际链接
        # referer错误会跳转到:https://www.jd.com/?se=deny
        self.page += 1
        yield scrapy.Request(self.next_url % (self.keyword, self.keyword, self.page, ','.join(ids)),
                             callback=self.next_parse, headers=headers)

    def next_parse(self, response):
        """
        爬取每页的后三十个商品,数据展示在一个特殊链接中:url+id(这个id是前三十个商品的id)
        :param response:
        :return:
        """
        for li in response.xpath('//li[@class="gl-item"]'):
            item = JdItem()
            title = li.xpath('div/div/a/em/text()').extract()  # 标题
            price = li.xpath('div/div/strong/i/text()').extract()  # 价格
            comment_num = li.xpath('div/div/strong/a/text()').extract()  # 评价条数
            url = li.xpath('div/div[@class="p-name p-name-type-2"]/a/@href').extract()  # 需要跟进的链接

            item['title'] = ''.join(title)
            item['price'] = ''.join(price)
            item['comment_num'] = ''.join(comment_num)
            item['url'] = ''.join(url)

            if item['url'].startswith('//'):
                item['url'] = 'https:' + item['url']
            elif not item['url'].startswith('https:'):
                item['info'] = None
                yield item
                continue

            yield scrapy.Request(item['url'], callback=self.info_parse, meta={"item": item})

        if self.page < 200:
            self.page += 1
            yield scrapy.Request(self.url % (self.keyword, self.keyword, self.page), callback=self.parse)

    def info_parse(self, response):
        """
        链接跟进,爬取每件商品的详细信息,所有的信息都保存在item的一个子字段info中
        :param response:
        :return:
        """
        item = response.meta['item']
        item['info'] = {}
        type = response.xpath('//div[@class="inner border"]/div[@class="head"]/a/text()').extract()
        name = response.xpath('//div[@class="item ellipsis"]/text()').extract()
        item['info']['type'] = ''.join(type)
        item['info']['name'] = ''.join(name)

        for div in response.xpath('//div[@class="Ptable"]/div[@class="Ptable-item"]'):
            h3 = ''.join(div.xpath('h3/text()').extract())
            if h3 == '':
                h3 = "未知"
            dt = div.xpath('dl/dt/text()').extract()
            dd = div.xpath('dl/dd[not(@class)]/text()').extract()
            item['info'][h3] = {}
            for t, d in zip(dt, dd):
                item['info'][h3][t] = d
        yield item
# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
from JD import settings


class JdPipeline(object):
    def __init__(self):
        # 获取setting中主机名,端口号和集合名
        # host = settings['MONGODB_HOST']
        # port = settings['MONGODB_PORT']
        # dbname = settings['MONGODB_DBNAME']
        # col = settings['MONGODB_COL']

        # 创建一个mongo实例
        client = MongoClient(host="127.0.0.1", port=27017)

        # 访问数据库
        db = client["JingDong"]

        # 访问集合
        self.col = db["JingDongPhone"]

    def process_item(self, item, spider):
        data = dict(item)
        self.col.insert(data)
        return item
# -*- coding: utf-8 -*-

# Scrapy settings for JD project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'JD'

SPIDER_MODULES = ['JD.spiders']
NEWSPIDER_MODULE = 'JD.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# 主机环回地址
MONGODB_HOST = '127.0.0.1'
# 端口号,默认27017
MONGODB_POST = 27017
# 设置数据库名称
MONGODB_DBNAME = 'JingDong'
# 设置集合名称
MONGODB_COL = 'JingDongPhone'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#     "authority": "search.jd.com",
#     "method": "GET",
#     "path": "/s_new.php?keyword=iphone&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&suggest=1.his.0.0",
#     "scheme": "https",
#     "cookie": "shshshfpa=817b0820-cebe-1aa5-5eaa-8f8d3b5945b3-1564123066; shshshfpb=nyq8Po%20NsVQIn1ih2zomJlw%3D%3D; TrackID=167xINX9YcKrGWU-JQ0EQWhwYstFi3gNOxFq6Em4_l6J6OECY5-pwyzHxRFr6TTZkLHI1m_3orstgEzPhWGk1pkbfG_ASOMSSscDY_oEz4XQ; pinId=DRR64H7p6D2CxuR9knABB7V9-x-f3wj7; qrsc=3; __jdu=502413746; areaId=2; PCSYCityID=CN_310000_310100_310112; xtest=3925.cf6b6759; ipLoc-djd=2-2825-51931-0; rkv=V0600; user-key=88fa50f5-ec38-48ff-9efe-beecaa5ffc96; cn=0; unpl=V2_ZzNtbRdfQEF8DRMDeR9ZDGIHFAhKUhcRd1tBVnNLXAcwB0FbclRCFX0UR1xnGlgUZwMZWEpcRxVFCEdkeBBVAWMDE1VGZxBFLV0CFSNGF1wjU00zQwBBQHcJFF0uSgwDYgcaDhFTQEJ2XBVQL0oMDDdRFAhyZ0AVRQhHZH8YXwBnARRYRWdzEkU4dlB8G1oEVwIiXHIVF0l1CkJRfxkRAWYAF11AUUYSRQl2Vw%3d%3d; __jdv=76161171|baidu-pinzhuan|t_288551095_baidupinzhuan|cpc|0f3d30c8dba7459bb52f2eb5eba8ac7d_0_d92e84df3748457d94e53b639c13f5b7|1569383391707; __jda=122270672.502413746.1564123065.1569380256.1569383392.14; __jdc=122270672; shshshfp=f5c7274df7eb1773b86a72faa494fed4; 3AB9D23F7A4B3C9B=SHZYIPK2KZYMKRXEKXFSLWSYQOVLA745EZ4NXQTNMFLQMVFRJKZT7VYDEIY6L2USE2KDEOHO2IPNACME4W7GJ2LKTM",
#     "referer": "https://search.jd.com/Search?keyword=iphone&enc=utf-8&suggest=1.his.0.0&wq=&pvid=ec4030074500424391b371d06a8a62fd",
# }

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'JD.middlewares.JdSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
#    'JD.middlewares.JdDownloaderMiddleware': 543,
# }

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
# }

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'JD.pipelines.JdPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

 

  • 1
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值