这篇博客是记录我做文本匹配的一些尝试,现在依然用之前的淘宝数据,准确度74%。
之前的博客已经讲解了句子序列是怎么回事了,现在我们把文本分类问题改写成问答匹配问题。那么显然现在我们的输入变成了两个句子,输出依然是分类标签。那么两个句子经过同样的RNN获得最后的sentence vector,如何匹配sentence vector呢?我这里用的是dot product,两个vector对应位置相乘,但是不需要最后相加的一步。dot product的长度是hidden layer unit size,和单个sentence vector的维度相同,然后接一个普通网络,output size为2,因为2分类。
import tensorflow as tf
import nltk
import pandas as pd
from collections import Counter
import numpy as np
import pandas as pd
import time
max_pair = 200000
def get_pair(number,dialogue):
pairs = []
for conversation in dialogue:
utterances = conversation[2:].strip('\n').split('\t')
# print(utterances)
# break
for i, utterance in enumerate(utterances):
if i % 2 != 0: continue
pairs.append([utterances[i], utterances[i + 1]])
if len(pairs)>=max_pair:
return pairs
return pairs
def convert_dialogue_to_pair():
dialogue = open('dialogue_alibaba2.txt', encoding='utf-8', mode='r')
dialogue = dialogue.readlines()
dialogue = [p for p in dialogue if p.startswith('1')]
print(len(dialogue))
pairs = get_pair(max_pair, dialogue)
# break
# print(pairs)
data = []
for p in pairs:
data.append([p[0], p[1], 1])
for i, p in enumerate(pairs):
data.append([p[0], pairs[(i + 8) % len(pairs)][1], 0])
df = pd.DataFrame(data, columns=['sentence_q', 'sentence_a', 'label'])
print(len(data))
return df
MAX_FEATURES = 150
MAX_SENTENCE_LENGTH = 100
# hyperparameters
lr = 0.001
training_iters = 100000
batch_size = 700
vocab_size = 200
embedding_size = 300
n_inputs = embedding_size # MNIST data input (img shape: 28*28)
n_steps = MAX_SENTENCE_LENGTH # time steps
n_hidden_units = 128 # neurons in hidden layer
n_classes = 2 # MNIST classes (0-9 digits)
def get_sentiment_data():
df_sentiment = convert_dialogue_to_pair()
print('=========finish convert ========')
df_sentiment = df_sentiment.sample(frac=0.9)
# df_sentiment = pd.read_csv('sentiment.csv', encoding='utf-8')
# df_sentiment['sentence_q'] = df_sentiment['sentence']
# df_sentiment['sentence_a'] = df_sentiment['sentence']
sentenses_q = df_sentiment['sentence_q'].values
sentenses_a = df_sentiment['sentence_a'].values
sentenses = [s.lower() for s in sentenses_q + sentenses_a]
wordlist_sentence = [nltk.word_tokenize(s) for s in sentenses]
ws = []