import jieba
txt = open("../resources/threekingdoms.txt","r",encoding="utf-8").read()
# 排除非人名
excludes = {"将军","却说","荆州","二人","不可","不能","如此","丞相","商议","主公",
"如何","军士","左右","军马","引兵","次日","大喜","天下","东吴","于是",
"今日","不敢","魏兵","陛下","一人","都督","人马","不知"}
# 对文本进行分词
words = jieba.lcut(txt)
# 创建统计用字典
counts = {}
for word in words:
if len(word) == 1:
continue
elif word == "诸葛亮" or word == "孔明曰":
rword = "孔明"
elif word == "关公" or word == "云长":
rword = "关羽"
elif word == "玄德" or word == "玄德曰":
rword = "曹操"
else:
rword = word
counts[rword] = counts.get(rword, 0) + 1
# 把排除序列去除
for word in excludes:
del counts[word]
items = list(counts.items())
#按照从大到小排序
items.sort(key=lambda x:x[1], reverse=True)
for i in range(10):
word, count = items[i]
print("{0:<10}{1:>5}".format(word, count))
jieba中文分词库-三国演义人名词频统计-Python
于 2023-10-10 15:20:41 首次发布