import jieba
f = "E:\KKK\红楼梦2.txt"
sf = "E:\KKK\停用词2.txt"
f1=open(f,'r',encoding='utf-8')
txt=jieba.lcut(f1.read())
f2=open(sf,'r',encoding='utf-8')
lines=f2.readlines()
ty=[] #存放停用词
for line in lines:
ty.append(line[:-1]) #去掉行尾换行符
txt0=[] #存放剔除停用词后的红楼梦文本
for x in txt:
if x not in ty:
txt0.append(x)
d={}
for word in txt0:
if len(word)<=1:
continue
elif word == '凤姐儿' or word == '凤丫头':
rword = '凤姐'
elif word == '二爷' or word == '宝二爷':
rword = '宝玉'
elif word == '颦儿' or word == '林妹妹' or word == '黛玉道':
rword = '黛玉'
elif word == '宝丫头':
rword = '宝钗'
elif word == '老祖宗':
rword = '贾母'
elif word == '袭人道':
rword = '袭人'
elif word == '贾政道':
rword = '贾政'
elif word == '琏二爷':
rword = '贾琏'
else:
rword=word
d[rword]=d.get(rword,0)+1
ls = list(d.items())
ls.sort(key=lambda x: x[1], reverse=True)
fo=open(r'result.csv', 'a', encoding='utf-8')
for i in ls:
if i[1]>=40:
print("{},{}".format(i[0],i[1]))
fo.write("{},{}\n".format(i[0],i[1]))
f1.close()
f2.close()
fo.close()
jieba分词 红楼梦
最新推荐文章于 2024-07-07 18:55:48 发布