这是部分代码,基本逻辑就是jieba分词,制作语料库,利用tfidf建模,最后找出计算相似度和给出相似度排名前十的。希望对你有帮助。
#训练文本分词
train_doc_list = []
for doc in TRAIN_LIST_1:
doc_list = [word for word in jieba.cut(doc)]
train_doc_list.append(doc_list)
#对训练文本分词创建词袋字典
from gensim import corpora,models,similarities
dictionary = corpora.Dictionary(list_1)
#使用doc2bow制作语料库
corpus = [dictionary.doc2bow(doc) for doc in list_1]
#TF_IDF模型对语料库建模
tfidf = models.TfidfModel(corpus)
#TF 词频 词条在文档d中出现的频率
#IDF逆向文件频率 包含词条t的文档越少,也就是n越小,IDF越大,则说明词条t具有很好的类别区分能力
writer = pd.ExcelWriter(r'C:\Users\zlk52\Desktop\工伤认定相似结果kan1.xlsx')
for i in range(TEST_1.shape[0]):
test_data0 = TEST_1.iloc[i:i+1,:]
# test_data0 = TEST_1
# test_doc = str(test_data0['join'])
test_doc = str(test_data0['Injury_AccidentSketch'])
test_doc = test_doc.replace('点','时')
test_doc_list = [word for word in jieba.cut(test_doc) if word in pick_words]
test_doc_vec = dictionary.doc2bow(test_doc_list)
index = similarities.SparseMatrixSimilarity(tfidf[corpus],num_features = len(dictionary.keys()))
sim = index[tfidf[test_doc_vec]]
sim_sorted = sorted(enumerate(sim),key = lambda item:-item[1])
k = 10
k_index = [sim_sorted[:k][i][0] for i in range(k)]
k_value = [sim_sorted[:k][i][1] for i in range(k)]
result = TRAIN_1.iloc[k_index,:]
result['K_VALUE'] = k_value
test_data0.to_excel(writer,sheet_name = '序号为{0}的工伤认定测试文本'.format(i+1))
result.to_excel(writer,sheet_name = '序号为{0}的相似文本'.format(i+1))