python实现短文本相似度计算—word2vec对文本编码、LSTM计算距离

path='./data/qa_test.txt'#数据的路径
path_word2vec='/home/ruben/data/nlp/word2vec_wx'#word2vec路径
#造数据
fake_data=open(path,'r').readlines()
tain_data_l=[]
tain_data_r=[]
for line in fake_data:
    for line2 in fake_data:
        if(line is not line2):
            print(line.replace('\n',''),line2.replace('\n',''))
            tain_data_l.append(line.replace('\n',''))
            tain_data_r.append(line2.replace('\n',''))
print('left length:',len(tain_data_l))
print('right length:',len(tain_data_r))
import jieba
#构造字典和weight矩阵
list_word=['UNK']
dict_word={}
tain_data_l_n=[]#左边LSTM的输入
tain_data_r_n=[]#右边LSTM的输入

for data in [tain_data_l,tain_data_r]:
    for line in data:
        words=list(jieba.cut(line))
        for i,word in enumerate(words):
            if word not in dict_word:
                dict_word[word]=len(dict_word)
print(dict_word)#字典构造完毕
id2w={dict_word[w]:w for w in dict_word}#word的索引
embedding_size=256
embedding_arry=np.random.randn(len(dict_word)+1,embedding_size)#句子embedding矩阵
embedding_arry[0]=0
word2vector=gensim.models.Word2Vec.load(path_word2vec)
for index,word in enumerate(dict_word):
    if word in word2vector.wv.vocab:
        embedding_arry[index]=word2vector.wv.word_vec(word)
print('embedding_arry shape:',embedding_arry.shape)
del word2vector
#将词组替换为索引
for line in tain_data_l:
    words = list(jieba.cut(line))
    for i,word in enumerate(words):
        words[i]=dict_word[word]
    tain_data_l_n.append(words)
print('tain_data_l_n length:',len(tain_data_l_n))
y_train=np.ones((len(tain_data_l_n),))
for line in tain_data_r:
    words = list(jieba.cut(line))
    for i,word in enumerate(words):
        words[i]=dict_word[word]
    tain_data_r_n.append(words)
print('tain_data_r_n length:',len(tain_data_r_n))
#得到语料中句子的最大长度
max_length=0
for line in tain_data_r_n:
    if max_length<len(line):
        max_length=len(line)
print('max length:',max_length)

# 对齐语料中句子的长度
tain_data_l_n = pad_sequences(tain_data_l_n, maxlen=max_length)
tain_data_r_n = pad_sequences(tain_data_r_n, maxlen=max_length)

#模型参数
n_hidden = 50
gradient_clipping_norm = 1.25
batch_size = 5
n_epoch = 15

#相似度计算
def exponent_neg_manhattan_distance(left, right):
    return K.exp(-K.sum(K.abs(left - right), axis=1, keepdims=True))


#输入层
left_input = Input(shape=(max_length,), dtype='int32')
right_input = Input(shape=(max_length,), dtype='int32')
embedding_layer = Embedding(len(embedding_arry), embedding_size, weights=[embedding_arry], input_length=max_length,
                            trainable=False)
#对句子embedding
encoded_left = embedding_layer(left_input)
encoded_right = embedding_layer(right_input)
#两个LSTM共享参数
shared_lstm = LSTM(n_hidden)
left_output = shared_lstm(encoded_left)
right_output = shared_lstm(encoded_right)
malstm_distance = Merge(mode=lambda x: exponent_neg_manhattan_distance(x[0], x[1]),
     output_shape=lambda x: (x[0][0], 1))([left_output, right_output])

# model
malstm = Model([left_input, right_input], [malstm_distance])

optimizer = Adadelta(clipnorm=gradient_clipping_norm)

malstm.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['accuracy'])
#train
malstm.fit(x=[np.asarray(tain_data_l_n), np.asarray(tain_data_r_n)], y=y_train, batch_size=batch_size, epochs=n_epoch,
                            validation_data=([np.asarray(tain_data_l_n), np.asarray(tain_data_r_n)], y_train) )
  • 0
    点赞
  • 21
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值