import pandas as pd
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt
def word_cloud(words):
"""
3、将文本文件分词,将排在前 100 且有意义的中文词做成词云图
4、将排在前 100 且有意义的中文词及其出现的次数,生成 WORD 文档。
:return:
"""
#导入停用词
text = ""
for t in words:
text = text + t
with open(r"stop_words.txt", 'r', encoding='UTF-8') as file:
stop_words = file.read().split("\n")
#统计词频
keywords = jieba.lcut(text)
counts = {}
cloud_data = []
for word in keywords:
if len(word) == 1: # 不使用单字作为关键词
continue
elif word in stop_words:
continue
else:
rword = word
counts[rword] = counts.get(rword, 0) + 1
items = list(counts.items())
items.sort(key=lambda x: x[1], reverse=True)
cut_text = jieba.cut(text)
result = ' '.join(cut_text)
word_cloud = WordCloud(font_path="simsun.ttc", # 设置词云字体
background_color="white", width=1000, height=880, # 词云图的背景颜色、高度和宽度
max_words=100,
stopwords=stop_words
) # 去掉的停词
word_cloud.generate(result)
plt.subplots(figsize=(10, 8))
plt.imshow(word_cloud)
plt.axis("off")
plt.show()
if __name__ == '__main__':
df = pd.read_csv('douban_comment.csv', names=['用户名', '评分', '评论时间', '地区', '评论内容'])
df = df.drop_duplicates() # 去重
words = df['评论内容'].tolist()
word_cloud(words)
stop_words.txt