IT之家
我们需要抓取的数据有文章标题、文章地址、发布日期、来源、原文章地址、作者、文章标签。
1、创建项目
>>>scrapy startproject ithome
2、创建爬虫
>>>scrapy genspider -t crawl news ithome.com
3、编写items.py文件,确定需要爬取的内容
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class IthomeItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
# 文章标题
title = scrapy.Field()
# 文章URL
url = scrapy.Field()
# 来源
source = scrapy.Field()
# 来源URL
source_url = scrapy.Field()
# 发布日期
release_date = scrapy.Field()
# 作者
author = scrapy.Field()
# 关键词
key_words = scrapy.Field()
4、编写爬虫文件news.py
注意LinkExtractor的用法,用于追踪网页url
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ithome.items import IthomeItem
import datetime
class NewsSpider(CrawlSpider):
name = 'news'
allowed_domains = ['ithome.com']
start_urls = ['https://www.ithome.com']
# 利用rule参数可以方便的跟踪新闻页面
rules = (
Rule(LinkExtractor(allow='com/\d/\d{3}/\d{3}'), callback='parse_item',follow=True),
)
def parse_item(self, response):
item = IthomeItem()
item['url'] = response.url
# 文章标题
item['title'] = response.xpath('.//*[@id="dt"]/div[1]/h1/text()').extract_first()
# 文章作者
item['author'] = response.css('#author_baidu strong::text').extract_first()
# 文章来源
item['source'] = response.css('#source_baidu > a::text').extract_first()
# 文章来源URL
item['source_url']=response.css('#source_baidu > a::attr(href)').extract_first()
# 发布日期
item['release_date'] = response.css('#pubtime_baidu::text').extract_first()
# 关键词
item['key_words']=response.css('.hot_tags > span a::text').extract_first()
return item
def close(self, reason):
self.crawler.stats.set_value('finish_time',datetime.datetime.now())
代码中重新写了close()方法,在爬虫关闭时几率关闭时间,写入数据统计参数finish_time中。
5、编写管道pipelines.py
将数据保持到MongoDB
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from pymongo import MongoClient
from scrapy.exceptions import DropItem
import datetime
class IthomePipeline(object):
# 定义集合ithome_news
collection = 'ithome_news'
def __init__(self, mongo_uri,mongo_db,stats):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
self.stats = stats
@classmethod
def from_crawler(cls,crawler):
return cls(
# 从settings.py中获取MONGoDB数据库连接信息,数据统计信息
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db = crawler.settings.get('MONGO_DB'),
stats= crawler.stats
)
def open_spider(self,spider):
self.client = MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]# ***注意这里的代码***
self.stats.set_value('start',datetime.datetime.now())
def close_spider(self,spider):
self.client.close()
def process_item(self, item, spider):
# 如果抓取的item中含有title,则为有效数据,保存,否则丢弃
if not item['title']:
raise DropItem('数据不完整,丢弃:{}'.format(item))
else:
self.db[self.collection].insert_one(dict(item))
return item
6、编写settings.py
# Scrapy settings for ithome project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'ithome'
SPIDER_MODULES = ['ithome.spiders']
NEWSPIDER_MODULE = 'ithome.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'ithome (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ithome.middlewares.IthomeSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'ithome.middlewares.IthomeDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'ithome.pipelines.IthomePipeline': 300,
}
# 当爬取到10000条数据时关闭爬虫
CLOSESPIDER_ITEMCOUNT = 10000
# MongDB
MONGO_URI = 'localhost:27017'
MONGO_DB='ithome'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
结果