import requests
from bs4 import BeautifulSoup
import re
from sendEmail2 import SendQQEmail
url = 'https://top.baidu.com/board?tab=realtime'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
}
r = requests.get(url, headers=headers)
# with open('HotSearch.html', 'w', encoding='utf-8') as f:
# f.write(r.text)
#soup = BeautifulSoup(open('./HotSearch.html', 'r', encoding='utf-8'), "html.parser")
soup = BeautifulSoup(r.text, "html.parser")
#print(soup.prettify())
#排行:
rank_div = soup.select('.category-wrap_iQLoo')
print(len(rank_div))
ranking = []
patten = re.compile('> (\d+) <')
for i in rank_div:
# print(i.select('.index_1Ew5p'))
s = ''.join("%s" %s for s in (i.select('.index_1Ew5p')))
ranking.append(patten.search(s).group(1))
print(ranking)
# -----------------标题------------------------
title = []
title_div = soup.select('.c-single-text-ellipsis')
for i in title_div:
#print(i)
#print(i.string)
title.append(i.string.replace(' ', ''))
print(title)
# -----------------内容,链接------------------------
summary = []
href = []
patten = re.compile('> (.+?) <a')
summary_div = soup.select('.large_nSuFU ')
for i in summary_div:
#print(i)
#print(type(i))
#print(i.a['href'])
href.append(i.a['href'])
#print('----------------------------------------')
if patten.search(str(i)):
summary.append(patten.search(str(i)).group(1))
else:
summary.append(" ")
print(summary)
print(href)
# -----------------热搜指数------------------------
hot = []
hot_div = soup.select('.hot-index_1Bl1a')
for i in hot_div:
#print(i)
#print(i.string)
hot.append(i.string.replace(' ',''))
print(hot)
获取百度热搜数据30
最新推荐文章于 2024-05-17 15:45:13 发布