代码以及释义如下:
import jieba
import jieba.analyse
import wordcloud
from PIL import Image, ImageSequence
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud, ImageColorGenerator
import jieba
import jieba.analyse
##============分词===============
stopwords=[lines.strip() for lines in open('chineseStopWords.txt',encoding='utf-8').readlines()] #停用词文件的读取
stopwords.append('')
path = '《三国演义》罗贯中.txt' #第一步爬虫结果存储的路径
file_in = open(path,'r',encoding='utf-8').read()
jieba.del_word("却说") #删除一些不想要的词
jieba.del_word("二人")
jieba.del_word("荆州")
words=jieba.lcut(file_in) #分词
# cut_text=" ".join(words)
##==============词频统计=========