# -*- coding: utf-8 -*-
import scrapy
from ..items import BookListItem,BookDetailItem
# 引入scrapy_redis中的爬虫类
from scrapy_redis.spiders import RedisSpider
# 修改爬虫类继承的父类为RedisSpider
class YunqiSpider(RedisSpider):
name = 'yunqi'
allowed_domains = ['qq.com']
# 将start_urls注释掉
# start_urls = ['http://yunqi.qq.com/bk/so2/n10p1']
# redis_key 启动爬虫时,会使用这个值作为从redis读取url地址的标识
redis_key = 'yunqi:start_urls'
def parse(self, response):
# 找到小说div
divs = response.xpath('//div[@class="book"]')
# for循环遍历小说
for novel in divs:
# 小说链接
novel_link = novel.xpath('a/@href').extract_first('')
img_src = novel.xpath('a/img/@src').extract_first('')
novel_id = novel.xpath('div/h3/a/@id').extract_first('')
novel_title = novel.xpath('div/h3/a/text()').extract_first('')
info = novel.xpath('div/dl/dd//text()').extract()
novel_auth = info[0]
novel_categray = info[1]
novel_status = info[2]
novel_date = info[3]
novel_numbers= info[4]
# 创建对象
item = BookListItem(novel_link=novel_link,img_src=[img_src],novel_id=novel_id,novel_title=novel_title,novel_auth=novel_auth,novel_categray=novel_categray,novel_status=novel_status,novel_date=novel_date,novel_numbers=novel_numbers)
yield item
# 进入详情页面
yield scrapy.Request(
url=novel_link,
callback=self.parse_detail,
meta={'novel_id':novel_id}
)
# 找到下一页s,继续爬取下一页
pages = response.xpath('//div[@id="pageHtml2"]/a')
next_link = pages[-1].xpath('@href').extract_first('')
if next_link:
yield scrapy.Request(
url=next_link
)
def parse_detail(self, response):
# 小说id
novel_id = response.meta.get('novel_id')
# 小说标签
tags = response.xpath('//div[@class="tags"]/text()').extract_first('').split(':')[-1].strip('\r\n ')
# 小说信息
infos = response.xpath('//div[@id="novelInfo"]/table/tr/td//text()').extract()
# 小说总点击
novelAllClick = infos[3].split(':')[-1]
# 月点击
novelMonClick = infos[6].split(':')[-1]
# 周点击
novelWeekClick = infos[9].split(':')[-1]
# 总人气
novelAllPopular = infos[4].split(':')[-1]
# 月人气
novelMonPopular = infos[7].split(':')[-1]
# 周人气
novelWeekPopular = infos[10].split(':')[-1]
# 总推荐
novelAllComm = infos[5].split(':')[-1]
# 月推荐
novelMonComm = infos[8].split(':')[-1]
# 周推荐
novelWeekComm = infos[11].split(':')[-1]
# 评论数
CommentNums = response.xpath('//span[@id="novelInfo_commentCount"]/text()').extract_first('')
# 创建item对象
item = BookDetailItem(novel_id=novel_id,tags=tags,novelAllClick=novelAllClick,novelMonClick=novelMonClick,novelWeekClick=novelWeekClick,novelAllPopular=novelAllPopular,novelMonPopular=novelMonPopular,novelWeekPopular=novelWeekPopular,novelAllComm=novelAllComm,novelMonComm=novelMonComm,novelWeekComm=novelWeekComm,CommentNums=CommentNums)
yield item
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class YqspiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class BookListItem(scrapy.Item):
# 小说链接
novel_link = scrapy.Field()
# 封面图片
img_src = scrapy.Field()
# 小说id
novel_id = scrapy.Field()
# 小说标题
novel_title = scrapy.Field()
# 小说作者
novel_auth = scrapy.Field()
# 小说分类
novel_categray = scrapy.Field()
# 连载状态
novel_status = scrapy.Field()
# 更新时间
novel_date = scrapy.Field()
# 小说字数
novel_numbers = scrapy.Field()
# self就是要写入的item cursor是游标
def insert_item(self,cursor):
# 准备sql
sql = 'INSERT INTO booklist(novel_link,img_src,novel_id,novel_title,novel_auth,novel_categray,novel_status,novel_date,novel_numbers)VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s)'
cursor.execute(sql,(self['novel_link'],self['img_src'][0],self['novel_id'],self['novel_title'],self['novel_auth'],self['novel_categray'],self['novel_status'],self['novel_date'],self['novel_numbers']))
class BookDetailItem(scrapy.Item):
# 小说id
novel_id = scrapy.Field()
# 小说标签
tags = scrapy.Field()
# 小说总点击
novelAllClick = scrapy.Field()
# 月点击
novelMonClick = scrapy.Field()
# 周点击
novelWeekClick = scrapy.Field()
# 总人气
novelAllPopular = scrapy.Field()
# 月人气
novelMonPopular = scrapy.Field()
# 周人气
novelWeekPopular = scrapy.Field()
# 总推荐
novelAllComm = scrapy.Field()
# 月推荐
novelMonComm = scrapy.Field()
# 周推荐
novelWeekComm = scrapy.Field()
# 评论数
CommentNums = scrapy.Field()
def insert_item(self,cursor):
# 准备sql
sql = 'INSERT INTO bookdetail(novel_id,tags,novelAllClick,novelMonClick,novelWeekClick,novelAllPopular,novelMonPopular,novelWeekPopular,novelAllComm,novelMonComm,novelWeekComm,CommentNums)VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
cursor.execute(sql, (
self['novel_id'], self['tags'], self['novelAllClick'], self['novelMonClick'], self['novelWeekClick'],
self['novelAllPopular'], self['novelMonPopular'], self['novelWeekPopular'], self['novelAllComm'],self['novelMonComm'],self['novelWeekComm'],self['CommentNums']))
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
# 引入包
from twisted.enterprise import adbapi
from pymysql import cursors
class YqspiderPipeline(object):
def process_item(self, item, spider):
return item
class TwistedMySQLPipeline(object):
# 初始化函数
def __init__(self,db_pool):
# 给属性赋值
self.db_pool = db_pool
@classmethod
def from_settings(cls, settings):
# 准备数据库链接的参数
params = dict(
host = settings['MYSQL_HOST'],
user = settings['MYSQL_USER'],
password = settings['MYSQL_PASSWD'],
port = settings['MYSQL_PORT'],
db = settings['MYSQL_DBNAME'],
charset = settings['MYSQL_CHARSET'],
use_unicode = True,
# 游标类型
cursorclass = cursors.DictCursor
)
# 创建连接池
db_pool = adbapi.ConnectionPool('pymysql',**params)
# 返回一个当前类对象
return cls(db_pool)
# 处理item
def process_item(self,item,spider):
# 将写入的操作放入连接池
# 1.要执行的操作函数 2...函数需要的参数
query = self.db_pool.runInteraction(self.do_insert,item)
# 执行错误,调用错误函数
query.addErrback(self.handler_error,item)
return item
# 错误处理函数
def handler_error(self,fail,item):
print(fail,item)
# 插入数据库操作
def do_insert(self,cursor,item):
item.insert_item(cursor)
# -*- coding: utf-8 -*-
# Scrapy settings for YQSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'YQSpider'
SPIDER_MODULES = ['YQSpider.spiders']
NEWSPIDER_MODULE = 'YQSpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'YQSpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.5
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'YQSpider.middlewares.YqspiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'YQSpider.middlewares.CustomUAMiddleware': 1,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware':None
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'YQSpider.pipelines.TwistedMySQLPipeline': 1,
# 会把数据存储redis一份
'scrapy_redis.pipelines.RedisPipeline':300
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# 配置远程连接的MySQL数据库地址
MYSQL_HOST = '192.168.10.220'
MYSQL_USER = 'root'
MYSQL_PASSWD = '123456'
MYSQL_PORT = 3306
MYSQL_DBNAME = 'bookdb'
MYSQL_CHARSET = 'utf8'
# 配置scrapy使用的调度器
# 使用scrapyd_redis中的调度器,来保证每一台主机爬取的url地址都是不同的
SCHEDULER = 'scrapy_redis.scheduler.Scheduler'
# 配置scrapy使用的去重类
DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'
# 配置当前项目连接的redis地址
REDIS_URL = 'redis://root:@192.168.10.220:6379'