from typing import List, Any, Union
import requests
from bs4 import BeautifulSoup
# 请求的首部信息(固定)
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
}
count = 1
while count <= 120:
print(count)
counts = str(count)
#输入需要扒取的网页地址
# url = 'http://www.8wenku.com/chapter/view?id=1611&chapter_no='+counts # 盾之勇者成名录
# url = 'http://www.8wenku.com/chapter/view?id=2667&chapter_no='+counts # 欢迎来到实力至上主义的教室
# url = 'http://www.8wenku.com/chapter/view?id=1690&chapter_no='+counts # 关于我转生变成史莱姆这档事
url = 'http://www.8wenku.com/chapter/view?id=1498&chapter_no='+counts # 关于我转生变成史莱姆这档事
count = count + 1
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text, 'lxml')#(固定)
# tags = soup.find_all('span', {'class': 'hello'})
# 获取哪个块的内容
news_list = soup.find('div', {'class': 'article-body'})
news = news_list
# if count >= 230:
# news = news_list
# else:
# news = news_list.find_all('p')
#将获得的list转换为str格式
news = str(news)
#输入需要导出到哪个位置的什么文件,编码格式自定
f = open('F:/不死者.txt', 'a', encoding='utf-8')
f.write(news)
f.close()