import requests
from bs4 import BeautifulSoup
import os
url = 'https://rqxh.net/wapindex.aspx?classid=467'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36'}
content = requests.get(url, headers=headers)
content.encoding = 'utf-8'
r = content.text
soup = BeautifulSoup(r, "html.parser")
text = soup.find('div',attrs={'class':'urlBox'}).find_all('a')
for n in text:
save_dir = n['title']
if not os.path.exists(save_dir):
os.makedirs(save_dir)
url1 = 'https://rqxh.net/' + n['href']
content = requests.get(url1, headers=headers)
content.encoding = 'utf-8'
r = content.text
soup = BeautifulSoup(r, "html.parser")
# print(soup)
text1 = soup.find('div',attrs={'class':'mx2'}).find_all('a')
for m in text1:
url2 = m['href']
print(url2)
content = requests.get(url2, headers=headers)
content.encoding = 'utf-8'
r = content.text
soup = BeautifulSoup(r, "html.parser")
# print(soup)
text2 = soup.find('div', attrs={'class':'content'}).get_text()
name = soup.find('div', attrs={'class':'content'}).find('h2').get_text()
print(name)
with open(f'{save_dir}/{name}.text', mode='w', encoding='utf-8') as f:
f.write(text2)
PYTHON爬取笑话
最新推荐文章于 2024-08-08 20:50:21 发布
该文章描述了一个使用Python的requests和BeautifulSoup库从特定网站抓取内容,包括链接和文本,然后保存到本地文件的过程,主要关注HTML解析和文件管理。
摘要由CSDN通过智能技术生成