最近在看人工智能方面的书籍。这是一个简单的基于sklearn 的LDA应用。具体的LDA的算法,可以百度一下。有很多的讲解。不过要看懂LDA需要很多的数学知识。
# -*- coding: utf-8 -*-
import jieba.posseg as jp, jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
mpl.rcParams['font.sans-serif']=['SimHei'] #显示中文的字体
docs=[
'共促互联网金融规范发展',
'金融支持小微企业',
'创新校园安全治理机制编织大网为学生护航',
'建立从幼儿园到大学的贫困生补助体系',
'研发艾滋病病毒药物',
'自主研发抗癌新药上市'
]
stopwords = ('编织') # 停词
flags = ('n') # 词性
#docs = [jieba.lcut(doc) for doc in docs]
words_list = []
for text in docs:
words = [w.word for w in jp.cut(text) if len(w.word)>1 and w.flag in flags and w.word not in stopwords]
words_list.append(words)
corpus = [' '.join(doc)
for doc in words_list]
print(corpus)
tfidf = TfidfVectorizer()
tfidf_matrix = tfidf.fit_transform(corpus)
n_pick_topics=3
lda = LatentDirichletAllocation(n_pick_topics, random_state=272)
docres = lda.fit_transform(tfidf_matrix)
color=['red','green','blue']
f,ax=plt.subplots(3,2)
label=['金融','教育','健康']
x=np.array([1,2,3])
for i in range(len(docs)):
k=int(i/2)
j=int(i%2)
ax[k][j].bar(x,docres[i],color=color[k],tick_label=label)
plt.show()
def print_top_words(model, feature_names, top_words):
#打印每个主题下权重较高的term
for topic_idx, topic in enumerate(model.components_):
print ("Topic #"+str(topic_idx+1))
print( " ".join([feature_names[i]
for i in topic.argsort()[:-top_words - 1:-1]]))
n_top_words=4
tf_feature_names = tfidf.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)