from imageio import imread
import warnings
warnings.filterwarnings("ignore")
import jieba #分词包
import numpy #numpy计算包
import codecs #codecs提供的open方法来指定打开的文件的语言编码,它会在读取的时候自动转换为内部unicode
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)
from wordcloud import WordCloud#词云包
#词云大小
matplotlib.rcParams['figure.figsize'] = (15.0, 15.0)
from wordcloud import WordCloud,ImageColorGenerator
def createSuperWordCloud(text_path,image_path):
#"./data/entertainment_news.csv"
df = pd.read_csv(text_path, encoding='utf-8')
# 去掉空行
df = df.dropna()
#df.head()
#将数据变成List
content=df.content.values.tolist()
segment=[]
for line in content:
try:
#列表
segs=jieba.lcut(line)
for seg in segs:
#判断是否为空或者是不是换行词
if len(seg)>1 and seg!='\r\n':
segment.append(seg)
except:
print(line)
continue
words_df=pd.DataFrame({'segment':segment})
stopwords=pd.read_csv("data/stopwords.txt",index_col=False,quoting=3,sep="\t",names=['stopword'], encoding='utf-8')#quoting=3全不引用
#先抽取在停用词里面的分词词组,然后再将它去掉
words_df=words_df[~words_df.segment.isin(stopwords.stopword)]
# 这一块是个难点,词频统计
words_stat = words_df.groupby('segment').agg(计数=pd.NamedAgg(column='segment', aggfunc='size')).reset_index().sort_values(
by='计数', ascending=False)
#读取图片生成背景
bimg=imread(image_path)
# 生成词云
wordcloud=WordCloud(background_color="white",mask=bimg,font_path='data/simhei.ttf',max_font_size=200)
#生成词频
word_frequence = {x[0]:x[1] for x in words_stat.head(1000).values}
wordcloud=wordcloud.fit_words(word_frequence)
# 重新上色
bimgColors=ImageColorGenerator(bimg)
# 去掉off
plt.axis("off")
#重新填写背景
plt.imshow(wordcloud.recolor(color_func=bimgColors))
# 调用
createSuperWordCloud("./data/entertainment_news.csv",'image/entertainment.jpeg')
【NLP】生成词云
最新推荐文章于 2024-05-17 17:28:09 发布