1.没做数据可视化分析,主要爬取部分已爬取
# 导入所需库
import requests
import json
import time
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import csv
import jieba
from os import path
def get_danmu_one_page(url_dm):
"""
:param url_dm: 视频弹幕URL地址
:return: 一页的弹幕数据
"""
# 添加headers
//这里的cookie可能过期,更新一下,user-agent也可一在网页中找到
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36',
'cookie': 'pgv_pvid=2656626432; pgv_pvi=9245538304; RK=O0SkBkcHwP; ptcz=d1cd7a9b7adfaf71f46b1feb5e216cc619972f4d0cfb44fbbbb2ecbc66874947; eas_sid=C1f5Y9q6f8O6y4S1Y8E420g7S7; tvfe_boss_uuid=ecad4d09a5a1d39c; video_guid=0e25824eb8017313; video_platform=2; pac_uid=1_3421355804; iip=0; o_cookie=3421355804; luin=o3421355804; lskey=000100009827634a0e80c1e971cf338f9c9f93c2cc6c18ac77cf82c87c4ef92500b2409b1f6e44d5b85286e2; main_login=qq; vuserid=1673668035; vusession=BhyKiwvBgoSkRsHSFU1HJg..; login_time_init=1605589774; _video_qq_version=1.1; _video_qq_main_login=qq; _video_qq_appid=3000501; _video_qq_vuserid=1673668035; _video_qq_vusession=BhyKiwvBgoSkRsHSFU1HJg..; _video_qq_login_time_init=1605589774; pgv_info=ssid=s6331875463; next_refresh_time=6587; _video_qq_next_refresh_time=6587; login_time_last=2020-11-17 13:9:45',
'referer': 'https://v.qq.com/x/cover/mzc00200mu6m8yo.html',
}
# 发起请求
try:
r = requests.get(url_dm, headers=headers, timeout=3)
except Exception as e:
print(e)
time.sleep(3)
r = requests.get(url_dm, headers=headers, timeout=3)
# 解析网页
data = json.loads(r.text, strict=False)['comments']
# 获取评论ID
comment_id = [i.get('commentid') for i in data]
# 获取用户名
oper_name = [i.get('opername') for i in data]
# 获取会员等级
vip_degree = [i.get('uservip_degree') for i in data]
# 获取评论内容
content = [i.get('content') for i in data]
# 获取评论时间点
time_point = [i.get('timepoint') for i in data]
# 获取评论点赞
up_count = [i.get('upcount') for i in data]
# 存储数据
df_one = pd.DataFrame({
'comment_id': comment_id,
'oper_name': oper_name,
'vip_degree': vip_degree,
'content': content,
'time_point': time_point,
'up_count': up_count
})
print(df_one)
return df_one
def get_danmu_all_page(target_id, vid):
"""
:param target_id: target_id
:param vid: vid
:return: 所有页弹幕
"""
df_all = pd.DataFrame()
# 记录步数
step = 1
for time_stamp in range(15, 45, 30): # 右侧设置一个足够大的数
try: # 异常处理
# 构建URL
url_dm = 'https://mfm.video.qq.com/danmu?target_id={}&vid={}×tamp={}'.format(target_id, vid,
time_stamp)
# 调用函数
df = get_danmu_one_page(url_dm)
# 终止条件
if df.shape[0] == 0:
print('没有数据!')
break
else:
df_all = df_all.append(df, ignore_index=True)
# 打印进度
print(f'我正在获取第{step}页的信息')
step += 1
# 休眠一秒
time.sleep(1)
except Exception as e:
print(e)
continue
print(f'爬虫程序中止,共获取{df_all.shape[0]}条弹幕!')
return df_all
# 获取target_id和vid,此处手动抓包获取
# 如若爬取一期下则在一起上前加入 # 号,吧一期下的代码前 # 删去
# 第一期上
df_1 = get_danmu_all_page(target_id='5979258600', vid='f0034cqed1f')
df_1.insert(0, 'episodes', '第一期上')
df_1.to_csv('第一期上集.csv', index=True)
# 第一期下
# df_2 = get_danmu_all_page(target_id='5977891317', vid='y00340t6k8g')
# df_2.insert(0, 'episodes', '第一期下')
# # 第二期上
# df_3 = get_danmu_all_page(target_id='6012064484', vid='v0034wu6i8c')
# df_3.insert(0, 'episodes', '第二期上')
#
# # 第二期下
# df_4 = get_danmu_all_page(target_id='6013331442', vid='m0034bdzygg')
# df_4.insert(0, 'episodes', '第二期下')
#
# # 第三期上
# df_5 = get_danmu_all_page(target_id='6045300272', vid='w0034sa0mnj')
# df_5.insert(0, 'episodes', '第三期上')
#
# # 第三期下
# df_6 = get_danmu_all_page(target_id='6045532740', vid='r0034sclogn')
# df_6.insert(0, 'episodes', '第三期下')
#
# # 第四期上
# df_7 = get_danmu_all_page(target_id='6078160126', vid='s0034dat3nf')
# df_7.insert(0, 'episodes', '第四期上')
#
# # 第四期下
# df_8 = get_danmu_all_page(target_id='6078222925', vid='u0034wv3xvg')
# df_8.insert(0, 'episodes', '第四期下')
#
# # 第五期上
# df_9 = get_danmu_all_page(target_id='6111483265', vid='t003495xitz')
# df_9.insert(0, 'episodes', '第五期上')
#
# # 第五期下
# df_10 = get_danmu_all_page(target_id='6111485603', vid='i0034tigcf1')
# df_10.insert(0, 'episodes', '第五期下')
# 列表存储
# df_list = [df_1,df_2]
# #df_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8,
# # 循环写出
# for df_name in df_list:
# # 读出数据
# epi_num =df_name['episodes'][0]
# print(f'正在写出第{epi_num}集的数据')
# df_name.to_csv(f'./datas/{epi_num}集.csv', index=True)
ds = pd.read_csv("C:/Users/Admin/Desktop/第一期上集.csv")
print(ds.head(30)) # 显示前爬取30条消息
list_all=[]
text = ''
with open('C:/Users/Admin/Desktop/第一期上集.csv', 'r', encoding='utf8') as file:
t = file.read()
file.close()
#jieba库分词
ls = jieba.lcut(t)
txt = " ".join(ls)
#导入自定义图片作为背景
backgroud_Image = plt.imread('C:/Users/Admin/Desktop/蜡笔小新.jpg')
print('加载图片成功!')
w = WordCloud(
#汉字设计
font_path="msyh.ttc",
#宽度设置
width=1000,
#高度设置
height=800,
#设置背景颜色
background_color="white",
# 设置停用词
stopwords=STOPWORDS,
## 设置字体最大值
max_font_size=150,
)
w.generate(txt)
print('开始加载文本')
img_colors = ImageColorGenerator(backgroud_Image)
# 字体颜色为背景图片的颜色
w.recolor(color_func=img_colors)
# 显示词云图
plt.imshow(w)
# 是否显示x轴、y轴下标
plt.axis('off')
#显示图片
plt.show()
# 获得模块所在的路径的
d = path.dirname(__file__)
# w.to_file(d,"C:/Users/Admin/Desktop/wordcloud.jpg")!!!!!!!!!!!
print('生成词云成功!')
with open('C:/Users/Admin/Desktop/第一期上集.csv',"r",encoding="utf-8") as exampleFile:
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 读取csv文件
exampleReader = csv.reader(exampleFile)
# csv数据转换为列表
exampleData = list(exampleReader)
# 得到数据行数
length_zu = len(exampleData)
# 得到每行长度
length_yuan = len(exampleData[0])
# for i in range(1,length_zu):
# print(exampleData[i])
x = list()
y = list()
#获取评论前五热词
for i in range(1, 5): # 从第二行开始读取
x.append(exampleData[i][5]) # 将第一列数据从第二行读取到5行赋给列表x
y.append(exampleData[i][7]) # 将第二列数据从第二行读取到5行赋给列表
plt.plot(x, y) # 绘制x,y的折线图
plt.show() # 显示折线图
#查看VIP等级评论次数
time0_on = [] #VIP 0
time1_on = [] #VIP 1
time2_on = [] #VIP 2
time3_on = [] #VIP 3
time4_on = [] #VIP 4
time5_on = [] #VIP 5
time6_on = [] #VIP 6
time7_on = [] #VIP 7
time8_on = [] #VIP 8
rd = pd.read_csv('C:/Users/Admin/Desktop/第一期上集.csv',encoding="utf-8")
for i in rd['vip_degree']:
if i ==0:
time0_on.append(i)
elif i ==1:
time1_on.append(i)
elif i == 2:
time2_on.append(i)
elif i ==3:
time3_on.append(i)
elif i ==4:
time4_on.append(i)
elif i ==5:
time5_on.append(i)
elif i ==6:
time6_on.append(i)
elif i == 7:
time7_on.append(i)
else:
time8_on.append(i)
values1_on = [len(time0_on),len(time1_on), len(time2_on), len(time3_on), len(time4_on), len(time5_on),len(time6_on), len(time7_on), len(time8_on)]
values2_on = ['VIP0','VIP1','VIP2','VIP3','VIP4','VIP5','VIP6','VIP7','VIP8']
plt.plot(values2_on, values1_on, label='VIP评论次数', color='y', linewidth=3.0)
plt.show()