#读取在线电子书
from urllib.request import urlopen
url='https://www.gutenberg.org/cache/epub/25606/pg25606-images.html'
html=urlopen(url).read()
html=html.decode('utf-8')
from nltk.book import *
from nltk.corpus import PlaintextCorpusReader
import nltk
#提取中文
htmlls=[]
for i in html:
if '\u4e00'<=i<='\uefa5':
htmlls.append(i)
fdist=FreqDist(htmlls)
print(fdist.most_common(5))#输出高频词
#绘制词汇离散图
import matplotlib as plt
plt.rcParams [ 'font.sans-serif']='SimHei'#使图的标注中文显示
nltk.draw.dispersion.dispersion_plot(html,['八月', '兵','人力', '天下', '今日'])
#读取本地电子书
with open("E:\围城.txt",'r') as f:
txt=f.read()
#re提取中文,jieba分词
import jieba
import re
cleaned_txt=''.join(re.findall('[\u4e00-\u9fa5]',txt))
wordlist=jieba.lcut(cleaned_txt)
text=nltk.Text(wordlist)
fdist=FreqDist(txt)
words=['方鸿渐','赵新楣','孙柔嘉','苏文纨','唐晓芙']
nltk.draw.dispersion.dispersion_plot(text,words,title='词汇离散图')
#绘制词云图
from wordcloud import WordCloud
import matplotlib.pyplot as plt
fdist_dict=dict(FreqDist(text))
wc=WordCloud(font_path="simhei.ttf", background_color="white", width=800, height=600).fit_words(fdist_dict)
plt.imshow(wc)
如果字体报错可能是pillow的版本问题,尝试更新pillow包。