# -*- coding: utf-8 -*-
# 导入必要的模块
import jieba
import matplotlib.pyplot as plt
from wordcloud import WordCloud
# 创建停用词列表
def stopwordslist(filepath):
stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
return stopwords
# 对句子进行jieba分词,虽然WordCloud也有分词功能,但感觉没有jieba分词的结果好
def seg_sentence(sentence):
sentence_seged = jieba.cut(sentence.strip())
stopwords = stopwordslist('stopwords1893.txt') # 这里加载停用词的路径
outstr = []
for word in sentence_seged:
if word not in stopwords:
if word != '\t' and word != ' ' and word != '\n':
outstr.append(word)
return outstr
# 打开并逐行读取文本文档
f = open("2.txt", 'r')
lines = f.readlines()
sentence = ''
for line in lines:
sentence = ''.join([sentence, line]) # 每行句子都连接起来
f.close()
# 输入文本得到jieba分词结果
word_result_list = seg_sentence(sentence)
# 将分词连接起来,以逗号分隔
word_result = ','.join(word_result_list)
plt.figure(figsize=(12,6))
# 中文字体的保存目录
font = r'font/SimHei.ttf'
# 词云的参数设置
wc = WordCloud(
background_color='white', # 设置背景颜色为白色
colormap='winter', # 设置颜色风格为'winter'
font_path=font, # 设置中文字体
width=1280, # 设置词云图的宽度
height=720, # 设置词云图的高度
max_font_size=150, # 设置字体显示的最大值
max_words=200 # 设置最多能显示的词数
)
# 输入文本给词云做处理
wc.generate(word_result)
# 显示词云图
plt.imshow(wordcloud)
# "off"表示不显示轴坐标
plt.axis("off")
plt.show()
# 输出词云图到当前目录
wordcloud.to_file("pict_wordcloud.jpg")