导入必要的包
import requests
import re
要爬的网页
url = 'http://www.shujy.com/5200/244309/'
模拟浏览器发送http请求
response = requests.get(url)
改变编码方式
response.encoding = 'utf-8'
获取目标小说的网页源码
html = response.text
获取小说的名字
title = re.findall(r'<meta property="og:novel:book_name" content="(.*?)"/>', html)[0]
新建文本文件保存爬取内容
fb = open('%s.txt' % title, 'w', encoding='utf-8')
爬取小说的每个章节及网页链接
dl = re.findall(r'<div id="list">.*?</div>', html, re.S)[0]
chapter_info_list = re.findall(r'<a href="(.*?)">(.*?)</a>', dl)
循环每一章节,分别下载,先把章节的链接,章节名提取出来
for chapter_info in chapter_info_list:
chapter_url, chapter_title = chapter_info
chapter_url = "http://www.shujy.com/5200/244309/%s " % chapter_url
chapter_url = chapter_url.replace(' ', '')
下载小说内容
chapter_response = requests.get(chapter_url)
chapter_response.encoding = 'utf-8'
chapter_html = chapter_response.text
chapter_content = re.findall(r'<div id="content">(.*?)</div>', chapter_html, re.S)[0]
数据清洗
chapter_content = chapter_content.replace(' ', '')
chapter_content = chapter_content.replace('<br />', '')
chapter_content = chapter_content.replace('&t;', '')
chapter_content = chapter_content.replace('&t;;', '')
小说保存到本地
fb.write(chapter_title)
fb.write('\n')
fb.write(chapter_content)
fb.write('\n')
print(chapter_url, chapter_title)