1、处理文本数据
在生成词云时,wordcloud默认会以空格或标点为分隔符对目标文本进行分词处
理。对于中文文本,分词处理需要由用户来完成。一般步骤是先将文本分词处理,
然后以空格拼接,再调用wordcloud库函数
2、产生词云图片
wordcloud库的核心是WordColoud类,所有的功能都封装在WordCloud类中。使用时需要实例 化 一 WordColoud类的对象 , 并调用其generate(text)方法将text文本转化为词云
- 实验代码
"""
Created on Sat Feb 13 20:42:14 2021
@author: Cynthia
"""
import numpy as np
from wordcloud import WordCloud, ImageColorGenerator#, STOPWORDS
import matplotlib.pyplot as plt
from PIL import Image
import jieba # cutting Chinese sentences into words
def plt_imshow(x, ax=None, show=True):
if ax is None:
fig, ax = plt.subplots()
ax.imshow(x)
ax.axis("off")
if show: plt.show()
return ax
def count_frequencies(word_list):
freq = dict()
for w in word_list:
if w not in freq.keys():
freq[w] = 1
else:
freq[w] += 1
return freq
# In[]
if __name__ == '__main__':
# setting paths
fname_text = 'texts/article.txt'
fname_stop = 'stopwords/hit_stopwords.txt'
fname_mask = 'pictures/owl.jpeg'
fname_font = 'SourceHanSerifK-Light.otf'
# read in texts (an article)
text = open(fname_text, encoding='utf8').read()
# Chinese stop words
STOPWORDS_CH = open(fname_stop, encoding='utf8').read().split()
# processing texts: cutting words, removing stop-words and single-charactors
word_list = [
w for w in jieba.cut(text)
if w not in STOPWORDS_CH and len(w) > 1
]
freq = count_frequencies(word_list)
# processing image
im_mask = np.array(Image.open(fname_mask))
im_colors = ImageColorGenerator(im_mask)
# generate word cloud
wcd = WordCloud(font_path=fname_font, # font for Chinese charactors
background_color='white',
mode="RGBA",
mask=im_mask,
)
#wcd.generate(text) # for English words
wcd.generate_from_frequencies(freq)
wcd.recolor(color_func = im_colors)
# visualization
ax = plt_imshow(wcd,)
ax.figure.savefig(f'single_wcd.png', bbox_inches='tight', dpi=150)
fig, axs = plt.subplots(1, 2)
plt_imshow(im_mask, axs[0], show=False)
plt_imshow(wcd, axs[1])
fig.savefig(f'conbined_wcd.png', bbox_inches='tight', dpi=150)