依赖库
pip install jieba
pip install matplotlib
pip install wordcloud
pip install snownlp
词频统计
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt
jieba.load_userdict("news.txt")
corpos = "美媒称,鉴于全球石油市场过度供给的情况,中国原油需求下滑是其首要担忧之一。过量生产拉低了石油价格,但是中国过去一年左右的疲弱需求引发了缓慢的回弹。"
seg_list = jieba.cut(corpos)
seg_list2 = jieba.cut(corpos)
text = " ".join(seg_list)
segStat = {}
for seg in seg_list2:
if seg in segStat:
segStat[seg] += 1
else:
segStat[seg] = 1
print segStat
wordcloud = WordCloud(font_path="D:\\PDM\\2.1\\simhei.ttf", background_color="black").generate(text)
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
关键字提取
# -*- coding: utf-8 -*-
import jieba.analyse
# 语料
corpos = "美媒称,鉴于全球石油市场过度供给的情况,中国原油需求下滑是其首要担忧之一。过量生产拉低了石油价格,但是中国过去一年左右的疲弱需求引发了缓慢的回弹。"
# 设置停用词
jieba.analyse.set_stop_words("stop_words.txt")
# 提取关键词
#tags = jieba.analyse.extract_tags(corpos, topK=5)
tags = jieba.analyse.textrank(corpos, topK=5, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'))
print(",".join(tags))
计算文章相似度
import jieba.analyse
from snownlp import SnowNLP
corpos = u"美媒称,鉴于全球石油市场过度供给的情况,中国原油需求下滑是其首要担忧之一。过量生产拉低了石油价格,但是中国过去一年左右的疲弱需求引发了缓慢的回弹。"
tags = jieba.analyse.extract_tags(corpos, topK=5)
text1 = []
for tag in tags:
text1.append(tag)
print text1
text = [text1,[u"文章",u"doc2"],[u"这是doc3"]]
text2 = text1
s = SnowNLP(text)
print s.sim(text2)
摘要提取
from snownlp import SnowNLP
text1 = u"美媒称,鉴于全球石油市场过度供给的情况,中国原油需求下滑是其首要担忧之一。过量生产拉低了石油价格,但是中国过去一年左右的疲弱需求引发了缓慢的回弹。"
s = SnowNLP(text1)
print s.summary(3)