1、Beautiful Soup
的安装:
pip install beautifulsoup4
pip install lxml
2、爬取代码:
import pandas as pd
import requests
from bs4 import BeautifulSoup
# 模拟浏览器头部信息,向豆瓣服务器发送消息
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/121.0.0.0 Safari/537.36'}
target = "https://movie.douban.com/subject/35890350/comments"
# 爬取页数
max_pages = 20
comments = []
for page in range(max_pages):
# 获取HTML页面信息
response = requests.get(url = target
+ '?start=' + str(page * 20)
, headers = headers
)
response.encoding = 'utf-8'
# print(response.text)
# 解析HTML信息并提取评论信息
soup = BeautifulSoup(response.text, 'lxml')
# print(soup)
# 根据网页结构定位评论区域
# comments_section = soup.find('div', {'class': 'mod-bd'}) #10页以内可用
# print(comments_section)
# 提取评论信息
# comment_elements = comments_section.find_all('span', {'class': 'short'}) #10页以内可用
comment_elements = soup.find_all('span', {'class': 'short'})
# print(comment_elements)
for element in comment_elements:
comments.append(element.text.strip())
# print(comments)
# 数据存储到EXCEL表
# 创建一个包含评论内容和评论时间的数据框
data = {'评论内容': comments}
df = pd.DataFrame(data)
# print(df)
# 指定输出的Excel文件名
output_filename = "douban_comments.xlsx"
# 将DataFrame保存为Excel文件
df.to_excel(output_filename, index=False, engine='openpyxl')
print(f'数据已保存到 {output_filename}')