import jieba
import jieba.analyse
from stylecloud import gen_stylecloud
# 1.读取要分析的文章
with open("ox.txt","r",encoding="utf-8") as f1:
txt=f1.read()
print(txt)
#2.进行分词 cut lcut
list1=jieba.lcut(txt)
print(list1)
# 3.去除停用词
with open("stopw.txt","r",encoding="utf-8") as f2:
stop_txt=f2.readlines() #读取一个列表
stopwords=[] #停用词列表
for i in stop_txt:
stopwords.append(i.strip('\n'))
good=[]
for a in list1:
if a not in stopwords and a!='\n' :
good.append(a)
print(len(good))
print(len(list1))
str1="".join(good)
print(str1)
#4.提取关键词 jieba.analyse.textrank(第一个参数是字符串 提取多少个关键词),整个方法返回的是一个列表
keylist=jieba.analyse.textrank(str1,topK=10)
print(keylist)
str2=" ".join(keylist)
#5.生成词云,gen_stylecloud(第一个参数字符串,中间是空格的字符串)
gen_stylecloud(str2,
icon_name='fas fa-globe', #词云的样式,前面fas fa-这是固定的
background_color='white', #背景颜色
output_name='66.jpg', #要生成图片的名称
font_path='msyh.ttc')