大秦赋豆瓣评论爬取与可视化

豆瓣大秦赋评论数据爬取

import requests
import chardet
import re
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
    #在爬虫里面如果出现了Referer最好也粘上去,因为有时候服务器会根据Referer来判断请求时由浏览器还是爬虫发出的
    'Referer':'https://www.douban.com/'
}
url = "https://movie.douban.com/subject/26413293/comments?status=P"
response = requests.get(url,headers=headers)#发起请求得到响应
response.encoding = "utf-8"
text = response.text#返回一个经过解码的字符串
print(text)

import requests
import chardet
import re
import pandas as pd
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
    #在爬虫里面如果出现了Referer最好也粘上去,因为有时候服务器会根据Referer来判断请求时由浏览器还是爬虫发出的
    'Referer':'https://www.douban.com/'
}
url = "https://movie.douban.com/subject/26413293/comments?status=P"
response = requests.get(url,headers=headers)#发起请求得到响应
response.encoding = "utf-8"
text = response.text#返回一个经过解码的字符串

comments_list = [i for i in re.findall('<span class="short">(.*?)</span>',text,re.S)]
comments_time_list = [i.strip() for i in re.findall('<span class="comment-time " title=".*?">(.*?)</span>',text,re.S)]
comments_rate_list = [int(i)//10 for i in re.findall('<span class="allstar(.*?) rating"',text,re.S)]
df1 = pd.DataFrame({"评论时间":comments_time_list,
                   "评论星级":comments_rate_list,
                   "评论内容":comments_list})
print(df1)
url = "https://movie.douban.com/subject/26413293/comments?start=20&limit=20&status=P&sort=new_score"

for i in range(0,1001,20):
    url = f"https://movie.douban.com/subject/26413293/comments?start={i}&limit=20&status=P&sort=new_score"
    #print(url)
import requests
import chardet
import re
import pandas as pd
import time

def get_url(i):
    url = f"https://movie.douban.com/subject/26413293/comments?start={i}&limit=20&status=P&sort=new_score"
    return url

def get_info(url):
    headers = {
    "Accept":"application/json, text/plain, */*",
    "Accept-Language":"zh-CN,zh;q=0.9",
    "Connection":"keep-alive",
    "Host":"movie.douban.com",
    "User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
    "Cookie":'bid=JLUTfxJxeh0; douban-fav-remind=1; __gads=ID=54e7875daaa536ba-224ad6f387c40009:T=1604323575:RT=1604323575:S=ALNI_Mb5QhIHQXkzu7X2EOs3Lq_Nvrr0dg; ll="108304"; __utmc=30149280; __utmc=223695111; __yadk_uid=R2F6MMSKKBJNPwZ7lwanORBKVYKxeWVu; _vwo_uuid_v2=DA80373BF7AB42C66DF02341A27121638|656c126dc6ca53b780ef0c0554838a74; dbcl2="228470775:M8fp0/PBDzE"; ck=DqpQ; push_noty_num=0; push_doumail_num=0; __utma=30149280.554006038.1604323549.1608111816.1608126511.6; __utmz=30149280.1608126511.6.6.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmt=1; __utmv=30149280.22847; __utmb=30149280.2.10.1608126511; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1608126532%2C%22http%3A%2F%2Flocalhost%3A8888%2F%22%5D; _pk_ses.100001.4cf6=*; __utma=223695111.1042259513.1607995061.1608111816.1608126532.5; __utmb=223695111.0.10.1608126532; __utmz=223695111.1608126532.5.5.utmcsr=localhost:8888|utmccn=(referral)|utmcmd=referral|utmcct=/; _pk_id.100001.4cf6=77da7558ba6afb50.1607995061.5.1608126562.1608111821'
    }
    response = requests.get(url,headers=headers)#发起请求得到响应
    response.encoding = "utf-8"
    text = response.text#返回一个经过解码的字符串

    comments_list = [i for i in re.findall('<span class="short">(.*?)</span>',text,re.S)]
    comments_time_list = [i.strip() for i in re.findall('<span class="comment-time " title="(.*?)">',text,re.S)]
    comments_rate_list = [int(i)//10 for i in re.findall('<span class="allstar(.*?) rating"',text,re.S)]
    
    df = pd.DataFrame({"评论时间":comments_time_list,
                   "评论星级":comments_rate_list,
                   "评论内容":comments_list})
    return df
    
if __name__ == "__main__":
    df = pd.DataFrame(columns=['评论时间','评论星级','评论内容'])
    for i in range(0,1001,20):
        url = get_url(i)
        print(f"正在打印第{i//20+1}页")
        df1 = get_info(url)
        df = pd.concat([df,df1])
        time.sleep(5)
    df.to_csv('final_all_comment.csv',encoding='utf_8_sig')

豆瓣大秦赋评论数据分析可视化

import pandas as pd
import matplotlib.pyplot as plt
import jieba
from wordcloud import WordCloud
from imageio import imread
# 忽略警告
import warnings
warnings.filterwarnings("ignore")

# 中文字体设置
plt.rcParams["font.family"] = "SimHei"
# 字体大小设置
plt.rcParams["font.size"] = 16
# 字体风格设置
plt.rcParams["font.style"] = "italic"
df = pd.read_csv("final_all_comment.csv",index_col=0)
df.head(10)
print("删除之前的记录数:",df.shape)
df.drop_duplicates(subset=['评论时间','评论内容'],inplace=True,keep='first')
print("删除之前的记录数:",df.shape)
df["评论天数"] = df["评论时间"].str[8:-9].astype(int)
df["小时"] = df["评论时间"].str[11:-6].astype(int)
def func(st):
    for i in range(1,int(len(st)/2)+1):
        for j in range(len(st)):
            if st[j:j+i] == st[j+i:j+2*i]:
                k = j + i
                while st[k:k+i] == st[k+i:k+2*i] and k<len(st):   
                    k = k + i
                st = st[:j] + st[k:]    
    return st
    
st = "我爱你我爱你我爱你好你好你好哈哈哈哈哈"
func(st)
def func(st):
    for i in range(1,int(len(st)/2)+1):
        for j in range(len(st)):
            if st[j:j+i] == st[j+i:j+2*i]:
                k = j + i
                while st[k:k+i] == st[k+i:k+2*i] and k<len(st):   
                    k = k + i
                st = st[:j] + st[k:]    
    return st

df["评论内容"] = df["评论内容"].apply(func)
df.head()
comment_day = df.groupby("评论天数")["评论天数"].count()
comment_day.plot(color="r",linewidth=3)
plt.ylabel("每一天的评论数")
plt.xticks(range(1,17),comment_day.index)
plt.savefig("评论数随时间的变化趋势",dpi=300)
comment_hour = df.groupby("小时")["小时"].count()
comment_hour.plot(color="blue",linewidth=3)
plt.ylabel("二十四小时内的评论数")
plt.xticks(range(0,24),comment_hour.index)
plt.savefig("二十四小时内的评论数的变化趋势",dpi=300)
comment_rate = df.groupby("评论星级")["评论星级"].count()
percentage = comment_rate / df["评论星级"].shape[0]
percentage.plot(kind="pie")
plt.axis("equal")
plt.savefig("星级评分的饼图",dpi=300)
import jieba
from wordcloud import WordCloud
from imageio import imread
# 1)对评论内容进行分词操作
jieba.add_word("大秦帝国")
df["切分后评论"] = df["评论内容"].apply(jieba.lcut)

# 2)读取停用词,并去除停用词
with open("stopword.txt","r",encoding="gbk") as f:
    stop = f.read()  # 返回的是一个字符串
stop = stop.split()  # 这里得到的是一个列表.split()会将空格,\n,\t进行切分,因此我们可以将这些加到停用词当中
stop = stop + [" ","\n","\t"]
df_after = df["切分后评论"].apply(lambda x: [i for i in x if i not in stop])

# 3)词频统计
all_words = []
for i in df_after:
    all_words.extend(i)

word_count = pd.Series(all_words).value_counts()

# 4)绘制词云图
# Ⅰ读取背景图片
back_picture = imread("alice_color.png")
# Ⅱ 设置词云参数
wc = WordCloud(font_path="simhei.ttf",
               background_color="white",
               max_words=2000,
               mask=back_picture,
               max_font_size=200,
               random_state=42
              )
wc2 = wc.fit_words(word_count)
# Ⅲ 绘制词云图
plt.figure(figsize=(20,10))
plt.imshow(wc2)
plt.axis("off")
plt.show()
wc.to_file("ciyun.png")

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

须知少时凌云志,曾许人间第一流

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值