首先要爬取哔哩哔哩网站的数据 , 第一步就是要分析网页,找到网上ajax异步加载的数据 即json文件,找到后还要对其爬去,这里我选正则表达式爬去网页,
其次是 对爬取的数据进行清洗,防止脏数据对生成的词云造成影响,将清洗完的的数据存入MongoDB数据库中
最后是 利用woldcould这个库 对弹幕进行词频统计最后利用matplotlib库生成词云图片
blibli.py
import requests
import json
import re
import pymongo
import numpy as np
import pandas as pd
from config import *
# 连接mongo数据库
client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]
# 获取哔哩哔哩视屏排行榜
def get_page(i):
url = "https://api.bilibili.com/pgc/season/index/result?season_version=-1&area=-1&is_finish=-1©right=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&order=4&st=1&sort=0&page="+str(i)+"&season_type=1&pagesize=20&type=1"
response = requests.get(url)
response.encoding = 'UTF-8'
return response.text
# 解析网站得到对应的视屏链接
def parse_page_url(js):
data = json.loads(js)
if "data" in data:
data = data.get('data')
for item in data.get('list'):
if item.get('badge') != "会员专享":
yield item.get('link')
# 利用正则抓取网页中需要的数据
def parse_page_detail(url):
response = requests.get(url)
response.encoding = 'UTF-8'
content = response.text
# print(content)
# 从script中抓取数据
pattern = re.compile(
'<h4>(.*?)</h4>.*?"mediaInfo".*?"stat":(.*?),"id".*?"title":"(.*?)","jpTitle".*?"epList":.*?"cid":(.*?),"from"',
re.S)
content_detail = re.search(pattern, content)
# 解析json文件数据
item = json.loads(content_detail.group(2))
return {
'标题': content_detail.group(3),
'综合评分': content_detail.group(1),
'播放次数': item.get('views'),
'弹幕历史总数': item.get('danmakus'),
'硬币': item.get('coins'),
'弹幕': get_danmakus(content_detail.group(4))
}
# 由于弹幕数据是异步加载得到的这里是获取当天的评论数据
def get_danmakus(cid):
url = "https://api.bilibili.com/x/v1/dm/list.so?oid=" + cid
response = requests.get(url)
response.encoding = 'UTF-8'
danmaku = response.text
pattern = re.compile('<d p.*?>(.*?)</d>', re.S)
danmakus = re.findall(pattern, danmaku)
return danmakus
# 保存到数据库
def save_to_mongo(result):
if db[MONGO_TABLE].insert(result):
print('存储成功', result)
return True
return False
if __name__ == '__main__':
# 获取9页视屏数据
for i in range(1,10):
js = get_page(i)
for url in parse_page_url(js):
save_to_mongo(parse_page_detail(url))
worldCloud.py
from wordcloud import WordCloud
import pymongo
from config import *
import matplotlib.pyplot as plt
# 连接mongo数据库 False是因为多进程下频繁的连接会报错
client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]
#从数据库中读取文件
def read_from_mongoDB(title):
#设置读取的列
field = {'标题': True, '综合评分': True, '播放次数': True, '弹幕历史总数': True, '硬币': True, '弹幕': True}
content=db[MONGO_TABLE].find({"标题":title},projection=field)
return content
# 生成词云
def show_woldCould(content):
for item in content:
title = item.get('标题')
# 将列表拼接字符串
text = " ".join(item.get('弹幕'))
# 系统字体
font = '/usr/share/fonts/opentype/noto/NotoSerifCJK-SemiBold.ttc'
wc = WordCloud(collocations=False, font_path=font, width=1000, height=700, margin=2).generate(text)
# 画图
plt.imshow(wc)
plt.axis("off")
plt.show()
url='./image/{0}.png'.format(title)
print(url)
# 把词云保存下来
wc.to_file(url)
if __name__ == '__main__':
title = input("请输入要生成词云的标题:")
show_woldCould(read_from_mongoDB(title))