1. 相关网址和库
参考文章:Python3 网络爬虫(二):下载小说的正确姿势
网址
https://www.xsbiquge.com
需要用到的库
request、beautifulsoup4、tqdm
2.代码实现
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
class NovelSpider(object):
def __init__(self):
self.server = 'https://www.xsbiquge.com'
self.target_url = 'https://www.xsbiquge.com/15_15338/'
self.book_name = '诡秘之主.txt'
# 保存章节目录及连接
self.chapter_list = []
# 1.发送请求
def get_response(self, url):
response = requests.get(url)
data = response.content.decode('utf-8')
return data
# 2.解析数据
# 2.1 解析章节目录数据
def parse_list_data(self, data):
bs_chapter = BeautifulSoup(data, 'lxml')
chapters = bs_chapter.find('div', id='list')
chapters = chapters.find_all('a')
for chapter in chapters:
chapter_dict = {}
# 章节名
chapter_dict['chapter'] = chapter.get_text()
chapter_dict['url'] = self.server + chapter.get('href')
self.chapter_list.append(chapter_dict)
# 2.2 解析章节内容数据
def parse_detail_data(self, data):
bs_content = BeautifulSoup(data, 'lxml')
texts = bs_content.find('div', id='content')
# 获得内容后,去除换行符以及首行缩进
content = texts.get_text().strip().split('\xa0' * 4)
return content
def run(self):
data = self.get_response(self.target_url)
self.parse_list_data(data)
# print(self.chapter_list)
for data in tqdm(self.chapter_list):
content_url = data['url']
content_data = self.get_response(content_url)
chapter_content = self.parse_detail_data(content_data)
# 保存数据
with open(self.book_name, 'a', encoding='utf-8') as f:
f.write(data['chapter'])
f.write('\n')
f.write('\n'.join(chapter_content))
f.write('\n\n')
if __name__ == '__main__':
NovelSpider().run()
3. 小结
- 分别解析目录页和详情页信息。
- 通过 tqdm 显示下载进度。