爬取笔趣阁小说——三寸人间
import requests
from lxml import etree
import os
import time
def get_page(url):
headers = {
'User-Agent' : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36"
}
response = requests.get(url=url, headers=headers)
if response.status_code == 200:
return response
else:
return None
def main():
if not os.path.exists('./sancun'):
os.mkdir('./sancun')
url = "https://www.xbiquge.la/10/10489/"
response = get_page(url=url)
response.encoding = 'utf-8'
page = response.text
tree = etree.HTML(page)
dd_list = tree.xpath('//div[@id="list"]/dl/dd')
for dd in dd_list:
name = dd.xpath('./a/text()')[0] + '.txt'
text_url = "https://www.xbiquge.la" + dd.xpath('./a/@href')[0]
article_page = get_page(url=text_url)
article_page.encoding = "utf-8"
article_page = article_page.text
page_tree = etree.HTML(article_page)
article = page_tree.xpath('//div[@id="content"]/text()')
article_path = 'sancun/' + name
with open(article_path, 'a', encoding='utf-8') as fp:
for text in article:
fp.write(text)
time.sleep(0.1)
print(name, "爬取成功")
if __name__ == '__main__':
main()
代码编写过程的错误
- 爬取的页面中文出现乱码:通过在爬取的网页的console端输入document.charset来查看编码,随后在爬取的页面后加入page.encoding="utf-8"即可解决中文乱码问题
- 在运行的中间过程中会报错,出现以下情况
查阅资料后,发现最简单的方法就是加一个sleep,给予反应时间,并且实施后确实不会再出现上述问题