1.目标
用scrapy来爬取起点小说网的完本小说
参考链接:
Python | 实战Scapy,爬取起点网全部小说!mp.weixin.qq.com2.创建项目
scrapy startproject name 通过终端进入到你创建项目的目录下输入上面的命令就可以完成项目的创建.name是项目名字.
scrapy startproject QidianSpider
cd QidianSpider
3.Item的编写
item中的title用来存书名,desc用来存书的内容.
import scrapy
class QidianspiderItem(scrapy.Item):
title=scrapy.Field()
desc=scrapy.Field()
pass
4.pipelines的编写
在pipelines可以编写存储数据的形式,我这里就是使用txt形式的文件来存储每一本书
import json
import codecs
# 以txt的形式存储,其实也就是一个存储方式
class QidianspiderPipeline(object):
def process_item(self, item, spider):
# 根据书名来创建文件
item.get('title') # 就可以获取到书名
self.file = codecs.open(item.get('title') + '.txt', 'w', encoding='utf-8')
self.file.write(item.get("desc") + "n")
return item
def spider_closed(self, spider):
self.file.close()
5.setting的编写
只要将下面代码中的QidianSpider替换成自己项目的名字就可以.
BOT_NAME = 'QidianSpider'
#USER_AGENT
#USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.54 Safari/536.5'
# start MySQL database configure setting
# end of MySQL database configure setting
SPIDER_MODULES = ['QidianSpider.spiders']
NEWSPIDER_MODULE = 'QidianSpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'QidianSpider(+http://www.yourdomain.com)'
ITEM_PIPELINES = {
'QidianSpider.pipelines.QidianSpiderPipeline': 300,
}
6.spider的编写
import scrapy
class QidianSpider(scrapy.Spider):
name = 'qidian'
allowed_domains = ['qidian.cn']
start_urls = [
"http://fin.qidian.com/?size=-1&sign=-1&tag=-1&chanId=8&subCateId=-1&orderId=&update=-1&page="
+ str(page) + "&month=-1&style=1&vip=-1" for page in range(1,140)]
def parse(self, response):
hxs = HtmlXPathSelector(response)
# 获取每一个书的url
book = hxs.select('//div[@class="book-mid-info"]/h4/a//@href').extract()
for bookurl in book:
# 根据获取到的书本url跳转到每本书的页面
# self.parseBook 是回调函数,作为请求后的处理逻辑
yield Request("http:" + bookurl, self.parseBook, dont_filter=True)
def parseBook(self, response):
hxs = HtmlXPathSelector(response)
# 获取免费阅读的url
# 这里用的是css选择器了,其实xpath更方便些我感觉
charterurl = hxs.select('//div[@class="book-info "]//a[@class="red-btn J-getJumpUrl "]/@href').extract()
# 每一本书都创建一个item
item = QidianspiderItem()
for url in charterurl:
# 通过免费阅读的url进入书的第一章
yield Request("http:" + url, meta={'item': item}, callback=self.parseCharter, dont_filter=True)
def parseCharter(self, response):
hxs = HtmlXPathSelector(response)
# 获取书名
names = hxs.select('//div[@class="info fl"]/a[1]/text()').extract()
# 获取上面传递过来的item
item = response.meta['item']
for name in names:
# 将书名存入到item的title字段中
names = item.get('title')
if None == names:
item['title'] = name
# 获取章节名
biaoti = hxs.select('//h3[@class="j_chapterName"]/text()').extract()
content = ''
for biaot in biaoti:
content = content + biaot + "n"
# 获取每一章的内容
s = hxs.select('//div[@class="read-content j_readContent"]//p/text()').extract()
for srt in s:
# 将章节和内容拼接起来存入到item的desc中
content = content + srt
desc = item.get('desc')
if None == desc:
item['desc'] = content
else:
item['desc'] = desc + content
if content == '':
yield item
# 获取下一章的内容
chapters = hxs.select('//div[@class="chapter-control dib-wrap"]/a[@id="j_chapterNext"]//@href').extract()
for chapter in chapters:
# print "https:" + chapter
yield Request("http:" + chapter, meta={'item': item}, callback=self.parseCharter, dont_filter=True)