短文本相似度计算
引用CSDN**经典的一句话:调试的错误就是编程给你最好的东西,因为在每个错误上面都标志着前进的一步。
文本相似度计算步骤如下:
- 分词;
def tokenization(self, line):
result = []
words = jieba.lcut(line)
for word in words:
if word not in self.stop_word_list('../data/stopwords.txt'):
result.append(word)
return result
2.建模;
def train_text(self, train_data_path):
corpus = self.read_data_from_text(train_data_path)
# 首先用dictionary方法获取词袋
self.dictionary = corpora.Dictionary(corpus)
# print('dictionary:',self.dictionary)
# dictionary.keys()#词袋中用数字对所有词进行了编号
# print(self.dictionary.token2id)#编号与词之间的对应关系
# 使用doc2bow制作语料库,语料库是一组向量,向量中的元素是一个二元组(编号、频次数),对应分词后的文档中的每一个词
doc_vectors = [self.dictionary.doc2bow(text) for text in corpus]
# print('doc_vectors:',doc_vectors)
return doc_vectors
3.模型预测;
# TF-IDF模型得到的相似度
def sim_cal_tfidf(self, doc_vector=None, input_file=""):
"""
:param doc_vector: doc vector
:param input_file:
:return:
"""
# 使用TF-IDF模型对语料库建模
tfidf = models.TfidfModel(doc_vector)
tfidf.save('../model/tfidf_model.pkl')
tfidf.load('../model/tfidf_model.pkl')
# 获取测试文档中,每个词的TF-IDF值
tfidf_vectors = tfidf[doc_vector]
# print('tfidf_vectors:', tfidf_vectors)
# print('tfidf_vectors[0]:', tfidf_vectors[0])
query = self.tokenization(input_file)
query_bow = self.dictionary.doc2bow(query)
# print('query_bow:', query_bow)
index = similarities.MatrixSimilarity(tfidf_vectors)
sims = index[query_bow]
return list(enumerate(sims)), tfidf_vectors
参考文章:
https://blog.csdn.net/xc_zhou/article/details/80952460
https://blog.csdn.net/kevinelstri/article/details/70139797