import jieba
import wordcloud
import pandas as pd
# 读取 Excel 文件
df = pd.read_excel('chinanews2.xlsx', sheet_name='news2')
# 读取某一列的数据,
s = df['content']
ls = df['content'].apply(lambda x: jieba.lcut(str(x))) # 对每个元素进行分词操作
text = ' '.join(ls.sum()) # 连接分词结果并转换为字符串
stopwords = ["的","是","了","记者","和","在","社","月","日","日电","说","不","而","这",
"到","被","就","认为","以及","已经","上","后","其","都","他","来","也","将"
,"为","没有","从","已","向","等","年","但","有","对","又","但","应该","要"
,"我们","与","并"] # 去掉不需要显示的词
wc = wordcloud.WordCloud(font_path="msyh.ttc",
width = 1000,
height = 700,
background_color='white',
max_words=100,stopwords=stopwords)
# msyh.ttc电脑本地字体,写可以写成绝对路径
wc.generate(text) # 加载词云文本
wc.to_file("chinanews.png") # 保存词云文件
用python给爬取到的新闻生成词云提取关键词
最新推荐文章于 2024-04-04 08:29:13 发布