import requests
from bs4 import BeautifulSoup
for page in range(1, 11): # 翻页操作
URL = f'https://www.chinanews.com.cn/scroll-news/news{page}.html'
Headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'}
response = requests.get(url=URL, headers=Headers)
if response.status_code != 200:
print(f"访问失败,错误码为{response.status_code}")
else:
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, 'html.parser') # 解析文本
li_list = soup.select('html > body > div.w1280.mt20 > div.content-left > div.content_list ul > li')
for i in li_list:
# 新闻类型
if i.select_one('li > div > a') != None:
NewsStyle = i.select_one('li > div > a').text
# 新闻标题
NewsTitle = i.select_one('li > div:nth-of-type(2) > a').text
# 新闻链接
NewsLJ = i.select_one('li > div:nth-of-type(2) > a').attrs['href']
print(f"https://wwwe.chinanews.com.cn/{NewsLJ}")
# 新闻时间
NewsTime = i.select_one('li > div.dd_time').text
print(f"类型:{NewsStyle}, 标题:{NewsTitle}, 链接:https://www.chinanews.com.cn/{NewsLJ}, 时间:{NewsTime}")
解决爬取多页问题
最新推荐文章于 2024-10-17 11:21:39 发布