注:
1.每次操作结果不同是因为该算法的起始点是随机的
2.之后要实现对该方法评估的功能
3.之后会实现选择最佳的K值
import jieba
import csv
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
def jieba_tokenize(text):
return jieba.lcut(text)
# takenizer=jieba_tokenize为分词函数,lowercase为是否将文本转换成小写
tfidf_vectorizer = TfidfVectorizer(tokenizer=jieba_tokenize, lowercase=True)
# 需要进行聚类的文本集
file=open('C:\\Users\\86049\\Desktop\\contract_info.csv',encoding='utf-8')
reader=csv.reader(file)
text_list=[]
for row in reader:
# 将列表类型转换为字符串类型
text=''.join(row)
#将字符串加入列表中
text_list.append(text)
# 用文本集建立词袋模型
tfidf_matrix = tfidf_vectorizer.fit_transform(text_list)
# 使用K-means进行文本聚类
num_clusters = 5
km_cluster = KMeans(n_clusters=num_clusters, max_iter=300, n_init=1, init='k-means++',n_jobs=1)
#各自文本的所被分配到的类索引
result = km_cluster.fit_predict(tfidf_matrix)
ans=np.zeros((100), dtype=np.int)
for i in result:
ans[i]=ans[i]+1
for i in range(0,num_clusters):
print(ans[i])