爬虫的基本流程:
scrapy框架的爬虫流程:
使用scrapy_redis爬取京东图书:
jd.py
# -*- coding: utf-8 -*-
import scrapy
from jdbook.items import JdbookItem
from copy import deepcopy
import json
import urllib
#存不进MongoDB???分布式 加四行代码 ,没有yield ,没有存到manggodb
class JdSpider(scrapy.Spider):
name = 'jd'
allowed_domains = ['jd.com','p.3.cn']
start_urls = ['https://book.jd.com/booksort.html']
def parse(self, response):
dt_list = response.xpath("//div[@class='mc']/dl/dt") # 大分类列表
for dt in dt_list:
item = JdbookItem()
item["b_cate"] = dt.xpath("./a/text()").extract_first()
em_list = dt.xpath("./following-sibling::dd[1]/em") # 小分类列表
for em in em_list:
item["s_href"] = em.xpath("./a/@href").extract_first()
item["s_cate"] = em.xpath("./a/text()").extract_first()
if item["s_href"] is not None:
item["s_href"] = "https:" + item["s_href"]
yield scrapy.Request(item["s_href"],callback=self.parse_book_list,meta={"item": deepcopy(item)})
def parse_book_list(self, response): # 解析列表页
item = response.meta["item"]
li_list = response.xpath("//div[@id='plist']/ul/li")
for li in li_list:
item["book_img"] = li.xpath(".//div[@class='p-img']//img/@src").extract_first()
if item["book_img"] is None:
item["book_img"] = li.xpath(".//div[@class='p-img']//img/@data-lazy-img").extract_first()
item["book_img"] = "https:" + item["book_img"] if item["book_img"] is not None else None
item["book_name"] = li.xpath(".//div[@class='p-name']/a/em/text()").extract_first().strip()
item["book_author"] = li.xpath(".//span[@class='author_type_1']/a/text()").extract()
item["book_press"] = li.xpath(".//span[@class='p-bi-store']/a/@title").extract_first()
item["book_publish_date"] = li.xpath(".//span[@class='p-bi-date']/text()").extract_first().strip()
item["book_sku"] = li.xpath("./div/@data-sku").extract_first()
url='https://sclub.jd.com/comment/productPageComments.action?&productId={}&score=0&sortType=5&page=0&pageSize=10'.format(item['book_sku'])
yield scrapy.Request('https://p.3.cn/prices/mgets?skuIds=J_{}'.format(item["book_sku"]),callback=self.parse_book_price,meta={"item": deepcopy(item)})
#yield scrapy.Request(url,callback=self.parse_book_comment, meta={"item": deepcopy(item)})
# 列表页翻页
next_url = response.xpath("//a[@class='pn-next']/@href").extract_first()
if next_url is not None:
next_url = urllib.parse.urljoin(response.url, next_url)
yield scrapy.Request(next_url,callback=self.parse_book_list,meta={"item": item})
def parse_book_price(self, response):
item = response.meta["item"]
item["book_price"] = json.loads(response.body.decode())[0]["op"]
#print(item)
yield item
def parse_book_comment(self, response):
item = response.meta["item"]
movie = json.loads(response.text)
for comment in movie['comments']:
item['comment']=comment['content']
#print(item)
items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JdbookItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
b_cate=scrapy.Field()
s_cate = scrapy.Field()
book_img = scrapy.Field()
book_name= scrapy.Field()
book_author= scrapy.Field()
book_press = scrapy.Field()
book_publish_date = scrapy.Field()
book_sku = scrapy.Field()
book_price = scrapy.Field()
s_href = scrapy.Field()
comment = scrapy.Field()
settings.py 分布式爬虫的设置
-*- coding: utf-8 -*-
# Scrapy settings for jdbook project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'jdbook'
SPIDER_MODULES = ['jdbook.spiders']
NEWSPIDER_MODULE = 'jdbook.spiders'
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = True
REDIS_URL = "redis://127.0.0.1:6379"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'jdbook (+http://www.yourdomain.com)'
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
#LOG_LEVEL = "WARNING"
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'jdbook.middlewares.JdbookSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'jdbook.middlewares.JdbookDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
#'jdbook.pipelines.JdbookPipeline': 300,
#'jdbook.pipelines.JdbookPipeline1': 301,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
pipelines.py存储文件
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
import json
client = MongoClient('localhost',port=27017)
collection = client["jd"]["book1"]
class JdbookPipeline(object):
def process_item(self, item, spider):
collection.insert(dict(item))
return item
class JdbookPipeline1(object):
def process_item(self, item, spider):
with open('temp.txt','a',encoding='utf-8')as f :
f.write(json.dumps(dict(item),ensure_ascii=False,indent=2))
return item