爬虫——小说下载
我终于爬完了这部小说,4367章,直接上代码
import requests
import re
import time
import random
from fake_useragent import UserAgent
def gethtml(url, headers):
i = 0
while i < 3:
try:
html = requests.get(url, headers=headers, timeout=30)
return html
except requests.exceptions.RequestException:
i += 1
# from bs4 import BeautifulSoup as bes
# print('OK')
# headers = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9',
# 'Cache-Control': 'max-age=0',
# 'Connection': 'keep-alive',
# 'Cookie': '_abcde_qweasd=0; _abcde_qweasd=0; Hm_lvt_169609146ffe5972484b0957bd1b46d6=1582254080; bdshare_firstime=1582254080195; Hm_lpvt_169609146ffe5972484b0957bd1b46d6=1582254147',
# 'Host': 'www.xbiquge.la',
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
# }
ua = UserAgent()
headers = {'User-Agent': ua.random}
# print(headers(1))
# exit()
url = 'http://www.xbiquge.la/0/445/'
resp = requests.get(url, headers=headers)
resp.encoding = 'utf-8'
# 目标小说主页源码
html = resp.text
# 小说名字
title = re.findall(r'<h1>(.*?)</h1>', html)[-1]
# print(title)
# exit()
# print(html)
# 获取每章信息
dl = re.findall(r'<dl>.*?</dl>', html, re.S)[0]
# print(dl)
# exit()
chapter_info_list = re.findall(r'<dd><a href=\'(.*?)\' >(.*?)</a></dd>', dl)
# print(chapter_info_list[0])
# exit()
# 新建文件,保存文件
fb = open('%s.txt' % title, 'w', encoding='utf-8')
# 每章节,分别下载
time_count = 0
for chapter_info in chapter_info_list:
headers = {'User-Agent': ua.random}
time_count = time_count + 1
time.sleep(random.choice([0.5, 1, 1.5, 2]))
# chapter_title = chapter_info[1]
# chapter_url = chapter_info[0]
chapter_url, chapter_title = chapter_info
chapter_url = "http://www.xbiquge.la/%s" % chapter_url
# print(chapter_url, chapter_title)
# 下载章节内容
chapter_response = gethtml(chapter_url, headers)
chapter_response.encoding = 'utf-8'
chapter_html = chapter_response.text
# print(chapter_html)
chapter_content = re.findall(r'<div id="content">(.*?)<script>read3\(\);</script>', chapter_html, re.S)
# print(chapter_content)
# 清洗数据
# exit()
chapter_content_str = " ".join(chapter_content)
# print(chapter_content_str[:])
# 数据清洗
chapter_content_str = chapter_content_str.replace(' ', '')
chapter_content_str = chapter_content_str.replace('\r<br />', '')
chapter_content_str = chapter_content_str.replace(' ', '')
chapter_content_str = chapter_content_str.replace('<br/>', '')
# print(chapter_content_str)
# exit()
fb.write(chapter_title)
fb.write(chapter_content_str)
fb.write('\n')
Str = '%s, %s, %s ' % (chapter_url, chapter_title, time_count)
print(Str)
# exit()
相较于第一次,添加了超时响应重连,添加随机的headers
# 超时重连
def gethtml(url, headers):
i = 0
while i < 3:
try:
html = requests.get(url, headers=headers, timeout=30)
return html
except requests.exceptions.RequestException:
i += 1
随机的header使用了fake_useragent库。
中间有些小插曲,但断断续续是爬完了,不得不说 需要一个好网络