import jieba # jieba中文分词库
with open('test.txt', 'r', encoding='UTF-8') as novelFile:
novel = novelFile.read()
# print(novel)
stopwords = [line.strip() for line in open('stop.txt', 'r', encoding='UTF-8').readlines()]
#line.strip()用空格划分,删除所有的空格
novelList = list(jieba.lcut(novel))#把文本精确的划分开,不存在冗余单词
novelDict = {}
# 统计出词频字典
for word in novelList:
if word not in stopwords:
# 不统计字数为一的词
if len(word) == 1:
continue
else:
novelDict[word] = novelDict.get(word, 0) + 1
# 对词频进行排序
novelListSorted = list(novelDict.items())
novelListSorted.sort(key=lambda e: e[1], reverse=True)sort排序,根据元组第二个元素排序
# 打印前10词频
topWordNum = 0
for topWordTup in novelListSorted[:10]:
print(topWordTup)
from matplotlib import pyplot as plt
x = [c for c,v in novelListSorted]
y = [v for c,v in novelListSorted]
plt.plot(x[:10],y[:10],color='r')
plt.show()
import jieba # jieba中文分词库
with open('data/data131368/test.txt', 'r', encoding='UTF-8') as novelFile:
novel = novelFile.read()
# print(novel)
stopwords = [line.strip() for line in open('/home/aistudio/data/data131368/stop.txt', 'r', encoding='UTF-8').readlines()]
novelList = list(jieba.lcut(novel))
novelDict = {}
# 统计出词频字典
for word in novelList:
if word not in stopwords:
# 不统计字数为一的词
if len(word) == 1:
continue
else:
novelDict[word] = novelDict.get(word, 0) + 1
from wordcloud import WordCloud,ImageColorGenerator
import jieba
import matplotlib.pyplot as plt
from imageio import imread
#读入背景图片
#生成词云图片
wordcloud = WordCloud(background_color='white',
scale=1.5,font_path='data/data131368/msyh.ttc').generate(' '.join(novelDict.keys()))
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
#保存图片
wordcloud.to_file('父亲1.jpg')
#https://www.bbsmax.com/A/8Bz89m4LJx/
wordcloud用法