一、导包
from tensorflow.keras import models, layers, losses, activations, optimizers
from tensorflow.keras import utils
import numpy as np
二、数据预处理
# 完成以下对话数据word=‘Ancient Turkic people probably stuffed their dumplings with meat. But it's unclear when this
# practice began, or whether they learned the art of dumpling-making from others. However this happened, dumplings
# certainly gathered steam in ancient China. That's where they first appear in the written record: more than 1,700 years
# ago, in a mouthwatering rhapsody by scholar Shu Xi.’
# ①读取数据
word = "Ancient Turkic people probably stuffed their dumplings with meat. But it's unclear when this practice began," \
" or whether they learned the art of dumpling-making from others. However this happened, dumplings certainly " \
"gathered steam in ancient China. That's where they first appear in the written record: more than 1,700 " \
"yearsago, in a mouthwatering rhapsody by scholar Shu Xi."
list_set = list(set(word))
seq_len = 10
char_dim = len(list_set)
# ②创建字典
word2id = {j: i for i, j in enumerate(list_set)}
# ③使用前10个字符预测下一位字符。
x = []
y = []
for i in range(len(word) - seq_len):
x_str = word[i:i + seq_len]
y_str = word[i + 1:i + 1 + seq_len]
x_id = [word2id[i] for i in x_str]
y_id = [word2id[i] for i in y_str]
x.append(x_id)
y.append(y_id)
# ④将数据转为张量
x = utils.to_categorical(x,num_classes=char_dim)
y = utils.to_categorical(y,num_classes=char_dim)
x = np.reshape(x,(-1,seq_len,char_dim))
y = np.reshape(y,(-1,seq_len,char_dim))
三、创建模型
# ⑤创建lstm类
# ①合理创建类属性
# ②定义前向传播方法
class LSTM(models.Model):
def __init__(self):
super(LSTM, self).__init__()
self.lstm = models.Sequential([
layers.LSTM(units=char_dim,return_sequences=True),
layers.LSTM(units=char_dim,return_sequences=True),
layers.TimeDistributed(layers.Dense(units=char_dim,activation=activations.softmax))
])
def call(self, inputs, training=None, mask=None):
out = self.lstm(inputs)
return out
四、创建对象
if __name__ == '__main__':
model = LSTM()
model.build(input_shape=(None,seq_len,char_dim))
# ③设置合理分类器
# ①设置优化器,超参数
model.compile(optimizer=optimizers.Adam(learning_rate=0.1),loss=losses.categorical_crossentropy,metrics='acc')
# ②训练模型
model.fit(x,y,epochs=50)
# ③得到预测字符
pre = model.predict(x)
for i in pre:
pre_id = np.argmax(i,axis=1)
char = [list_set[i] for i in pre_id]
print(''.join(char))