Python爬虫记录
20200221更新:
改善后的在这里:
新的代码
———————————————这里是分割线——————————————
感觉爬虫很有意思,就自己学习写了一个简单的爬虫,爬取网站的小说,但是爬取2000多章后,就会卡住。。。很是绝望,写下来记录一下,之后解决会补充,顺便问下,有无大佬指点一下。。。。
import requests
import re
import time
import random
from bs4 import BeautifulSoup as bes
print('OK')
url = 'http://www.shuquge.com/txt/595/index.html'
resp = requests.get(url)
resp.encoding = 'utf-8'
# 目标小说主页源码
html = resp.text
# 小说名字
title = re.findall(r'<meta property="og:title" content="(.*?)" />', html)
print(title)
# print(html)
# 获取每章信息
dl = re.findall(r'《神话版三国》正文卷.*?</dl>', html, re.S)[0]
chapter_info_list = re.findall(r'href="(.*?)">(.*?)<', dl)
print(chapter_info_list[0])
# 新建文件,保存文件
fb = open('%s.txt' % title, 'w', encoding='utf-8')
# 每章节,分别下载
for chapter_info in chapter_info_list:
time.sleep(random.random()*3)
# chapter_title = chapter_info[1]
# chapter_url = chapter_info[0]
chapter_url, chapter_title = chapter_info
chapter_url = "http://www.shuquge.com/txt/595/%s" % chapter_url
# print(chapter_url, chapter_title)
# 下载章节内容
chapter_response = requests.get(chapter_url)
chapter_response.encoding = 'utf-8'
chapter_html = chapter_response.text
# print(chapter_html)
chapter_content = re.findall(r'<script>read2\(\);</script>(.*?)<script>read3\(\);</script>', chapter_html, re.S)
# print(chapter_content)
# 清洗数据
chapter_content_str = " ".join(chapter_content)
# print(chapter_content_str[:])
# 数据清洗
chapter_content_str = chapter_content_str.replace(' ', '')
chapter_content_str = chapter_content_str.replace('<br/>', '')
chapter_content_str = chapter_content_str.replace(' ', '')
# print(chapter_content_str)
fb.write(chapter_title)
fb.write(chapter_content_str)
fb.write('\n')
print(chapter_url)
# exit()