考虑代码量较长,注释部分含在代码内部
可以实现抓取小说的名字,作者,封面,所有章节的信息
(0)先看效果
(1)数据库设计
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db.models.signals import pre_delete
from django.dispatch import receiver
# 小说列表
class NovelList(models.Model):
nid = models.AutoField(primary_key=True)
name = models.CharField(verbose_name='小说标题', max_length=15)
url = models.FileField(verbose_name='小说封面地址', upload_to='novel./', default='')
author = models.CharField(verbose_name='小说作者', max_length=20, default="")
introduce = models.CharField(verbose_name='小说介绍', max_length=300, default="")
pages = models.IntegerField(verbose_name='小说章节数', default=0)
class Meta:
verbose_name_plural = '小说'
# 小说章节
class NovelContent(models.Model):
nid = models.AutoField(primary_key=True)
content = models.CharField(verbose_name='章节内容', max_length=8000)
# 所属小说id
novel = models.ForeignKey(verbose_name='小说id', to='NovelList', to_field='nid', on_delete=models.CASCADE)
title = models.CharField(verbose_name='章节标题', max_length=25)
# 上一章id
pre_chapter_id = models.IntegerField(verbose_name='上一章章节id', default=0)
# 下一章id
then_chapter_id = models.IntegerField(verbose_name='下一章章节id', default=0)
class Meta:
verbose_name_plural = '小说内容'
(2)抓取小说主函数
import re
import requests
from lxml import etree
import os
import time
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoProject.settings')
import django
import random
django.setup()
from app import models
from django.core.files import File
from django.core.files.base import ContentFile
django.setup()
# 获得html文章
def get_html(url):
user_agent = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
headers = {
'User-Agent': random.choice(user_agent)
}
page_text = requests.get(url=url, headers=headers).text
html = etree.HTML(page_text, etree.HTMLParser())
return html
"""
index: 解决广告
index = 0 : 首发网址htTp://m.26w.cc page_id %3 == 1
index = 1 : 记住网址m.26ksw.cc page_id %3 == 2
index = 2 : 一秒记住http://m.26ksw.cc page_id % 3 == 0
"""
def get_novel(book_id=57583, page_start_id=54365820, pages=0, is_force_read=False):
"""
ps: 考虑有章外章节的存在, 使得章节数缺漏判断极为困难
http://www.26ksw.cc/book/{}/{}.html
:param book_id: 书籍id
:param page_start_id: 章节id
:param is_force_read: 可选参数, 是否强制读取, 用于章节数不匹配的情况下, 仍然强制读取
:param pages: 章节数, 可选, 选中时才能判断是否需要强制读取
:return:
"""
while 1:
try:
url = 'http://www.26ksw.cc/book/{}.html'.format(book_id)
html = get_html(url)
page_all = pages
total_a = len(html.xpath('//*[@id="list"]/dl/dd[position() > 12]/a'))
novel_name = html.xpath('//*[@id="info"]/h1/text()')[0].strip()
author = html.xpath('//*[@id="info"]/p[1]/a/text()')[0].strip()
introduce = html.xpath('//*[@id="intro"]/text()')[0].strip()
url = "http://www.26ksw.cc" + html.xpath('//*[@id="fmimg"]/img/@src')[0]
if total_a <= page_all:
if not is_force_read:
raise Exception("该小说章节被遗漏了, 预期章节为{}, 然而实际章节为{}".format(page_all, total_a))
else:
print("注意!该小说章节被遗漏了, 预期章节为{}, 然而实际章节为{}".format(page_all, total_a))
break
except IndexError:
print('加载失败,等待5s')
time.sleep(5)
if not (novel_name and author and introduce):
raise Exception("读取异常")
else:
novel_obj = models.NovelList.objects.filter(name=novel_name).first()
if not novel_obj:
novel_obj = models.NovelList.objects.create(**{
"author": author,
"introduce": introduce,
"pages": total_a,
"name": novel_name
})
# 保存图片
origin_path = os.path.dirname(os.getcwd()) + '/media/novel/'
img_save_url = origin_path + str(novel_obj.nid) + '.jpg'
with open(img_save_url, "wb") as file:
img = requests.get(url)
file.write(img.content)
file.close()
novel_obj.url = 'novel/{}.jpg'.format(novel_obj.nid)
novel_obj.save()
# 下一篇文章的id
next_page_id = page_start_id
# 当前文章
page_current = 1
# 是否结束
end_flag = False
page_start_then_nid = 0
url = 'http://www.26ksw.cc/book/{}/{}.html'.format(book_id, next_page_id)
# 一直读取到没有下一章为止
while not end_flag:
time.sleep((random.random() + 0.4) * 5)
while 1:
try:
html = get_html(url)
# 下一章链接
next_page_href = html.xpath('//*[@id="main"]/div/div/div[2]/div[1]/a[4]/@href')[0]
# 爬取 题目与段落, 名字
novel = {'title': "", 'name': "", 'content': []}
novel_name = html.xpath('//*[@id="main"]/div/div/div[1]/a[2]/text()')[0].strip()
novel_title = html.xpath('//*[@id="main"]/div/div/div[2]/h1/text()')[0].strip()
novel_content_list = html.xpath('//*[@id="content"]/p[@class="content_detail"]')
for article_p in novel_content_list:
if int(next_page_id) % 3 == 1:
p = "<p>" + article_p.xpath('./text()')[0].strip().replace(
'\r\n \r\n 首发网址htTp://m.26w.cc', '') + '</p>'
if int(next_page_id) % 3 == 2:
p = "<p>" + article_p.xpath('./text()')[0].strip().replace(
'\r\n \r\n 记住网址m.26ksw.cc', '') + '</p>'
if int(next_page_id) % 3 == 0:
p = "<p>" + article_p.xpath('./text()')[0].strip().replace(
'\r\n \r\n 一秒记住http://m.26ksw.cc', '') + '</p>'
novel['content'].append(p)
novel['title'] = novel_title
novel['name'] = novel_name
novel['content'] = "".join(novel['content'])
print('next_page_id={}'.format(next_page_id))
print('当前章节为{}'.format(page_current), '字符长度为{}'.format(len(novel['content'])), novel, '\n')
# 创建章节小说
if page_current == 1:
page_obj = models.NovelContent.objects.create(
title=novel['title'],
content=novel['content'],
novel=novel_obj,
pre_chapter_id=0,
then_chapter_id=page_current + 1,
)
page_start_then_nid = page_obj.nid
page_obj.then_chapter_id = page_start_then_nid + 1
page_obj.save()
else:
page_start_then_nid += 1
models.NovelContent.objects.create(
title=novel['title'],
content=novel['content'],
novel=novel_obj,
pre_chapter_id=page_start_then_nid - 1,
then_chapter_id=page_start_then_nid + 1
)
# 下一章节id
next_page_id = re.search(r"/(\d+).html", next_page_href)
# 没有下一章节, 结束循环
if not next_page_id:
end_flag = True
break
else:
next_page_id = next_page_id.group(1)
# 下一章节url
url = 'http://www.26ksw.cc/book/{}/{}.html'.format(book_id, next_page_id)
page_current += 1
except IndexError or ConnectionError:
time.sleep(5)
print('索引错误, 等待5s')
pass
page_obj = models.NovelContent.objects.last()
page_obj.then_chapter_id = 0
page_obj.save()
print("小说已经爬取成功, 爬取章节数:{}".format(page_current))
get_novel(book_id=2320, page_start_id=9723862, pages=1672, is_force_read=False)