1.CrawlSpider介绍
crawlspider是Spider的派生类(一个子类),Spider类的设计原则是只爬取start_url列表中的网页,而CrawlSpider类定义了一些规则(rule)
来提供跟进link的方便的机制,从爬取的网页中获取link并继续爬取数据。
2.创建CrawlSpider项目
scrapy startproject wxapp
cd wxapp/
scrapy genspider -t crawl wxapp_spider "wxapp-union.com"
3.使用CrawlSpider爬取微信小程序社区数据
项目结构:
wxapp_spider.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import items
#继承CrawlSpider类
class WxappSpiderSpider(CrawlSpider):
#爬虫name
name = 'wxapp_spider'
#域名
allowed_domains = ['wxapp-union.com']
#开始链接
start_urls = ['http://www.wxapp-union.com/portal.php?mod=list&catid=2&page=1']
#定义规则
#LinkExtractor链接提取器符合规则的就提取
#follow代表是否继续跟进
#scrapy有url去重功能
rules = (
Rule(LinkExtractor(allow=r'.+mod=list&catid=2&page=\d'), follow=True),
#需要提取url返回的数据那么就可以指定一个回调函数来提取数据
Rule(LinkExtractor(allow=r'.+article-.+\.html'), callback="parse_detail",follow=False),
)
def parse_detail(self, response):
#标题
title = response.xpath("//h1[@class='ph']/text()").get()
print(title)
author = response.xpath("//p[@class='authors']/a/text()").get()
print(author)
time = response.xpath("//p[@class='authors']/span/text()").get()
print(time)
content = response.xpath("//td[@id='article_content']//text()").getall()
content = "".join(content).strip()
print(content)
item = items.WxappItem(title=title, author=author, time=time, content=content)
#将数据丢给管道去存储
yield item
items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class WxappItem(scrapy.Item):
title = scrapy.Field()
author = scrapy.Field()
time = scrapy.Field()
content = scrapy.Field()
pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exporters import JsonLinesItemExporter
#存储数据
class WxappPipeline(object):
def __init__(self):
self.fp = open('wxapp.json','wb')
self.exporter = JsonLinesItemExporter(self.fp,ensure_ascii=False,encoding='utf-8')
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
def close_spider(self,spider):
self.fp.close()
settings.py
# -*- coding: utf-8 -*-
# Scrapy settings for wxapp project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'wxapp'
SPIDER_MODULES = ['wxapp.spiders']
NEWSPIDER_MODULE = 'wxapp.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'wxapp (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'wxapp.middlewares.WxappSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'wxapp.middlewares.WxappDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#打开管道
ITEM_PIPELINES = {
'wxapp.pipelines.WxappPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
start.py(启动爬虫)
from scrapy import cmdline
cmdline.execute("scrapy crawl wxapp_spider".split())