1. 词云WordCloud——续
①Python中使用open内置函数进行文件读取
②利用函数jieba.lcut(words)进行分词
③过滤重复词和无关词
④给十个人物出现的次数进行排序
⑤输出图片
示例一:三国TOP10人物分析
import jieba
from wordcloud import WordCloud
# 1.读取小说内容
with open('./novel/threekingdom.txt','r',encoding='utf-8') as f:
words = f.read()
counts = {} #{‘曹操’:234,‘回寨’:56}
excludes = {"将军", "却说", "丞相", "二人", "不可", "荆州", "不能", "如此", "商议",
"如何", "主公", "军士", "军马", "左右", "次日", "引兵", "大喜", "天下",
"东吴", "于是", "今日", "不敢", "魏兵", "陛下", "都督", "人马", "不知",
"孔明曰","玄德曰","刘备","云长"}
# 2.分词
words_list = jieba.lcut(words)
print(words_list)
for word in words_list:
if len(word) <= 1:
continue
else:
# 更新字典中的值
# counts[word] = 去除字典中原来键相应的值 + 1
# counts[word] = counts[word] + 1 # counts[words]如果没有就要报错
# 字典。get(k) 如果字典中没有这个键,返回NONE
counts[word] = counts.get(word, 0) + 1
print(counts)
# 3.词语过滤,删除无关词,重复词
counts['孔明'] = counts['孔明'] + counts['孔明曰']
counts['玄德'] = counts['玄德'] + counts['玄德曰'] + counts['刘备']
counts['关公'] = counts['关公'] + counts['云长']
for word in excludes:
del counts[word]
# 4.排序[(),()]
items = list(counts.items())
print(items)
def sort_by_count(x):
return x[1]
# items.sort(key=sort_by_count,reverse=True)
items.sort(key=lambda i:i[1],reverse=True)
li = [] #['孔明','','']
# 遍历
for i in range(10):
# 序列解包
role, c