最近在学习python爬虫,所以写了一个17K小说网爬取的脚本来做练习,分享一下
1.爬取的网页为http://all.17k.com/lib/book.html 小说分类页面的免费区的小说,付费vip的没有账号
这事小说列表页
这是小说说明页
这是小说章节详情页
这事小说的内容页
因为小说网页为静态的页面,作为爬虫练习入门非常的简单了。我们倒着推先找到小说内容页的内容,通过xpath很简单就能获取到,
再根据小说章节页获取所有的章节的网址url,循环遍历,然后存储就把一本小说存储下来了,;
接下来我们只要获取所有小说的url列表就以此遍历就把小说存储下来了。
如下图,小说会以名字+作者的方式存储,内容会按照章节+文章保存,并且会赋存一分json文件,记录爬取的书的列表信息;
不太爱写文章所以 分享源码连接:https://download.csdn.net/download/weixin_39179620/10928691
部分代码(如下)
# coding=utf-8
'''
作者:Jguobao
QQ:779188083
email:jgb2010start@163.com
'''
import requests
from lxml import etree
import json
class NovelSpider:
def __init__(self, start_url="http://all.17k.com/lib/book/2_0_0_0_0_0_1_0_1.html?"):
self.start_url = start_url
self.url = "http://www.17k.com/list/1038316.html" # "http://www.17k.com/chapter/2938105/36788407.html"
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
def parse_url(self, url):
response = requests.get(url, headers=self.headers)
if response.status_code != 200:
return None
return response.content.decode()
def get_detail_page(self, html_str, index=""):
if index != "":
index = str(index) + "."
html_str_etree = etree.HTML(html_str)
title = html_str_etree.xpath("//div[@class='readAreaBox content']/h1/text()")[0]
word = html_str_etree.xpath("//div[@class='readAreaBox content']/div[@class='p']/text()")
word = [" " + w.strip() + "\n" if len(w.strip()) > 0 else None for w in word]
title = "\t\t\t" + index + title.strip() + "\n"
for w in word[:]:
if w is None:
word.remove(w)
return title, word[:-1]
此段为提取html信息的主要函数
def get_txt_name(self, html_str):
html_str_etree = etree.HTML(html_str)
filename = html_str_etree.xpath("//div[@class='Main List']//h1/text()")[0] + '.txt' if len(
html_str_etree.xpath("//div[@class='Main List']//h1")) > 0 else "无名"
return filename
def get_url_list(self, html_str):
item_list = []
html_str_etree = etree.HTML(html_str)
url_list = html_str_etree.xpath("//dl[@class='Volume']//dd/a/@href")
volume_list = html_str_etree.xpath("//dl[@class='Volume']") # 获取所有的卷 并且从卷下获取所有的卷标 章节
for volume in volume_list:
item = {}
item['卷标'] = volume.xpath(".//span[@class='tit']/text()")[0]
item['info'] = volume.xpath(".//span[@class='info']/text()")[0]
# 获取下面的所有的a标签
a_list_etree = volume.xpath("./dd/a")
a_list = []
for a in a_list_etree:
item2 = {}
title2 = a.xpath(".//span[@class='ellipsis']/text()")[0].strip()
item2[title2] = "http://www.17k.com" + a.xpath("./@href")[0]
a_list.append(item2)
item['章节'] = a_list
item_list.append(item)
# 根据a标签列表获取所有章节与对应的链接
url_list = ["http://www.17k.com" + url for url in url_list]
return item_list
此段为保存与处理一部小说的主要函数
def save_txt(self, txt_list, filename):
with open('./txt/' + filename, "a", encoding='utf8') as f:
f.write(txt_list[0])
for wd in txt_list[1][:-2]:
f.write(wd)
def process_item_list(self, item_list, filename): # 获取小说的章节内容并保存
for item in item_list:
volume_label = item['卷标']
volume_info = item['info']
item_list = item['章节']
lens2 = len(item_list)
for item in item_list:
for i in item:
print(i)
title = i
html_detail_str = self.parse_url(item[i])
txt_list = list(self.get_detail_page(html_detail_str))
txt_list[0] = '\t\t' + title + '\n'
self.save_txt(txt_list, filename)
创建对象,指定要爬取的页面以及要爬取的页数
if __name__ == '__main__':
#start_url默认值为:"http://all.17k.com/lib/book/2_0_0_0_0_0_1_0_1.html?" 也可以自己输入
ns = NovelSpider() # 输入start_url要爬取的网页
ns.run(1000) #run方法输入要爬取多少页