使用beautifulsoup爬取小说网站小说的内容:
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
if __name__=='__main__':
headers = {
'referer': 'https://www.qiushibaike.com/imgrank/',
'User-Agent': 'Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36'
}
url='https://www.shicimingju.com/book/sanguoyanyi.html'
page_text=requests.get(url=url,headers=headers).content
#实例化对象,将源码加载其中
soup=BeautifulSoup(page_text,'lxml')
#解析章节标题和详情页的url
li_list=soup.select('.book-mulu>ul>li')
fp=open('./test.txt','w',encoding='utf-8')
for li in li_list:
title=li.a.string
detail_url='https://www.shicimingju.com/'+li.a['href']
#解析章节内容
detail_page_text=requests.get(url=detail_url,headers=headers).content
detail_soup=BeautifulSoup(detail_page_text,'lxml')
div_tag=detail_soup.find('div',class_='chapter_content')
content=div_tag.text
fp.write(title+':'+content+'\n')
print(title,'ok')
输出
结果: