Python爬取豆瓣网某热门电影评论

import requests
import lxml
from lxml import etree
import pandas as pd
import time
import random
# 头部信息
header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'
}
# 设置cookies
cookies = {
    'cookie': 'll="118186"; _pk_id.100001.4cf6=35caae0a00be0738.1704106999.; bid=Ldx1xzL4-NY; dbcl2="281689738:+ITwYHhRF4A"; push_noty_num=0; push_doumail_num=0; __utmv=30149280.28168; _vwo_uuid_v2=D18303FEFA0A68F1D2C87504D3B634E22|a3877a0965cdb318daaa87e49dda22e6; ck=1S0X; frodotk_db="463aa8caac206a2cd2affa6004613e84"; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1720312003%2C%22https%3A%2F%2Fcn.bing.com%2F%22%5D; _pk_ses.100001.4cf6=1; __utma=30149280.548300531.1688621674.1720277330.1720312003.11; __utmb=30149280.0.10.1720312003; __utmc=30149280; __utmz=30149280.1720312003.11.6.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utma=223695111.544466185.1704106999.1720277330.1720312003.9; __utmb=223695111.0.10.1720312003; __utmc=223695111; __utmz=223695111.1720312003.9.5.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; ap_v=0,6.0'
}
# 构造函数,循环遍历爬取数据
def get_data(num_pages):
    all_data = []
    for page in range(num_pages):
        start = page * 20
        url = f'https://movie.douban.com/subject/1889243/comments?start={start}&limit=20&status=P&sort=new_score'
        print(f'正在爬取第{page + 1}页数据.')
        # 异常处理
        try:
            # 模拟浏览器向服务器发送请求
            response = requests.get(url, headers=header,cookies=cookies)
            response.raise_for_status()
            html = etree.HTML(response.text)
            # 爬取数据comment,user,date,recommend
            comments = html.xpath('//div[@id="comments"]/div[@class="comment-item "]/div[@class="comment"]/p[@class=" comment-content"]/span[@class="short"]/text()')
            users = html.xpath('//div[@id="comments"]/div[@class="comment-item "]/div[@class="comment"]/h3/span[@class="comment-info"]/a/text()')
            dates = html.xpath('//div[@id="comments"]/div[@class="comment-item "]/div[@class="comment"]/h3/span[@class="comment-info"]/span[@class="comment-time "]/@title')
            recommends = html.xpath('//div[@id="comments"]/div[@class="comment-item "]/div[@class="comment"]/h3/span[@class="comment-info"]/span[starts-with(@class,"allstar")]/@title')
            for user, recommend, date, comment in zip(users, recommends, dates, comments):
                all_data.append([user, recommend, date, comment])
        except requests.exceptions.RequestException as e:
            print(f"获取第 {page + 1} 页数据时出错:{e}")
        # 设置时间间隔,应对反爬虫
        time.sleep(random.uniform(1, 5))
    return all_data
num_pages = int(input("输入你要爬取的页数:"))
data = get_data(num_pages)
df = pd.DataFrame(data, columns=['用户名', '推荐', '时间', '评论'])
# 存储为csv文件,编码格式utf-8
df.to_csv('douban_comments.csv', index=False,encoding="utf-8")
print("评论数据已保存到douban_comments.csv文件中")

  • 4
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

AustSimple

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值