个人博客:http://www.chenjianqu.com/
原文链接:http://www.chenjianqu.com/show-40.html
前一篇文章中使用简单的seq2seq搭建了单词级聊天机器人《聊天机器人-基于QQ聊天记录训练》,里面也简单介绍了seq2seq的原理。这里尝试用seq2seq做一下字符级的翻译:英语->粤语。
seq2seq的训练过程是'teacher forcing'的,英法翻译举例如下:
推断过程不实用'teacher forcing',如下:
因此seq2seq的训练和预测过程的模型是不一样的,它们使用相同的层。
训练模型的架构:
用tensorboard画出来的结构如下:
推断模型:
编码器
解码器
加载数据集
数据集来源:http://www.manythings.org/anki/.
import os
num_samples=3200
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
data_path=r'D:\NLP\dataset\机器翻译\yue-eng\yue.txt'
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
input_text, target_text = line.split('\t')
target_text = '\t' + target_text + '\n'
input_texts.append(input_text)#输入
target_texts.append(target_text)#输出
for char in input_text:
if char not in input_characters:
input_characters.add(char)#输入字符集
for char in target_text:
if char not in target_characters:
target_characters.add(char)#输出字符集
#排序
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
#字符数
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
#输出输出的最大句子长度
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])
数据预处理
将文本映射为向量。
import numpy as np
#字典:把字符映射为数字
input_token_index = dict([(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict([(char, i) for i, char in enumerate(target_characters)])
#字典:把数字映射回字符
rinput_dict = dict( (i, char) for char, i in input_token_index.items())
rtarget_dict = dict((i, char) for char, i in target_token_index.items())
#将数据集映射为向量
encoder_input_data = np.zeros((len(input_texts), max_encoder_seq_length, num_encoder_tokens),dtype='float32')
decoder_input_data = np.zeros((len(input_texts), max_decoder_seq_length, num_decoder_tokens),dtype='float32')
decoder_target_data = np.zeros((len(input_texts), max_decoder_seq_length, num_decoder_tokens),dtype='float32')
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.
for t, char in enumerate(target_text):
# decoder_target_data is ahead of decoder_input_data by one timestep
decoder_input_data[i, t, target_token_index[char]] = 1.
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, target_token_index[char]] = 1.
print(encoder_input_data.shape)
print(decoder_input_data.shape)
print(decoder_target_data.shape)
定义模型
lstm的cell数量为256维。
from keras.models import Model
from keras.layers import Input, LSTM, Dense
latent_dim = 256
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
decoder_inputs = Input(shape=(None, num_decoder_tokens))
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,initial_state=encoder_states)
decoder_dense_1 = Dense(int(num_decoder_tokens/4), activation='relu')
decoder_outputs_1 = decoder_dense_1(decoder_outputs)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs_1)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.summary()
plot_model(model,to_file='eng2yue_model.png',show_shapes=True)
训练并保存模型
设置回调函数,在训练过程中保存最优模型。
import keras
batch_size = 16
epochs = 100
callbacks_list=[
keras.callbacks.EarlyStopping(
monitor='acc',
patience=10,
),
keras.callbacks.ModelCheckpoint(
filepath='eng2yue_model_checkpoint.h5',
monitor='val_loss',#如果val_loss不改善,则不需要覆盖模型文件
save_best_only=True
),
keras.callbacks.TensorBoard(
log_dir='my_log_dir',
histogram_freq=1#每一轮之后记录直方图
)
]
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',metrics=['acc'])
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.1,
callbacks=callbacks_list
)
model.save('eng2yue_model.h5')
训练结果:
精度很低,这是训练集太小的缘故。
定义预测模型
编码器和解码器分开。
#编码器
encoder_model = Model(encoder_inputs, encoder_states)
#解码器
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states
)
预测
测试结果不是很理想,从训练集的验证精度上也看得出来。
#输入文本
text='I get it.'
text_seq = np.zeros((1, max_encoder_seq_length, num_encoder_tokens),dtype='float32')
for t, char in enumerate(text):
text_seq[0, t, input_token_index[char]] = 1.
result = ''
states_value = encoder_model.predict(text_seq)#编码输入得到状态变量
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, target_token_index['\t']] = 1.#初始化解码器输入向量
stop_condition = False
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = rtarget_dict[sampled_token_index]
result += sampled_char
#退出循环
if (sampled_char == '\n' or len(result) > max_decoder_seq_length):
stop_condition = True
#更新decoder输入
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# 更新状态
states_value = [h, c]
print(result)
参考文献
[1]Francois Chollet.A ten-minute introduction to sequence-to-sequence learning in Keras.https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html. 2017-09-29