import tensorflow as tf
# 用于模型搭建
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import SimpleRNN, LSTM, GRU, Dropout, Dense, Activation, Input
# 数据处理的辅助工具
from tensorflow.keras import utils
from tensorflow.keras.callbacks import CSVLogger, ReduceLROnPlateau
class DLSTMcell(tf.keras.Model):
"""
return : x_t, c_t, h_t
"""
def __init__(self, hiddensize=None):
super().__init__()
self.f = Dense(hiddensize, use_bias=True)
self.i = Dense(hiddensize, use_bias=True)
self.c = Dense(hiddensize, use_bias=True)
self.o = Dense(hiddensize, use_bias=True)
self.d = Dense(hiddensize, use_bias=True)
def call(self, inputs, dinputs, c, h):
c = c * tf.nn.sigmoid(self.f(tf.concat([inputs, h], axis=1)))
c = c + tf.nn.sigmoid(self.i(tf.concat([inputs, h], axis=1))) * \
tf.nn.tanh(self.c(tf.concat([inputs, h], axis=1)))
o = tf.nn.sigmoid(self.o(tf.concat([inputs, h], axis=1)) + self.d(dinputs))
h = tf.nn.tanh(c) * o
return h, c, h
class DLSTM(tf.keras.Model):
"""
return : x_t, c_t, h_t
"""
def __init__(self, batchsize, timesteps, hiddensize=None, return_sequences=True):
super().__init__()
self.ts = timesteps
self.return_sequences = return_sequences
self.hiddensize = hiddensize
self.batchsize = batchsize
self.cell = DLSTMcell(hiddensize)
def call(self, inputs, dinputs):
outx = []
c = tf.random.uniform([self.batchsize, self.hiddensize])
h = tf.random.uniform([self.batchsize, self.hiddensize])
for i in range(self.ts):
_, c, h = self.cell(inputs[:,i,:], dinputs[:,i,:], c, h)
outx.append(h)
if self.return_sequences:
return tf.stack(outx, axis=1)
else:
return outx[-1]
batchsize = 32
timesteps = 10
featuresize = 4
units = 40
inputs = tf.random.uniform([batchsize, timesteps, featuresize])
dinputs = tf.random.uniform([batchsize, timesteps, featuresize])
model = DLSTM(batchsize, timesteps, units)
out = model(inputs, dinputs)
out.shape
>>> TensorShape([32, 10, 40])
def dlstm(batchsize, timesteps, featuresize):
inputs = Input(shape=[timesteps, featuresize])
dinputs = Input(shape=[timesteps, featuresize])
x = DLSTM(batchsize, timesteps, 40, return_sequences=True)(inputs, dinputs)
x = DLSTM(batchsize, timesteps, 40, return_sequences=True)(x, dinputs)
x = DLSTM(batchsize, timesteps, 10, return_sequences=False)(x, dinputs)
x = Dense(featuresize)(x)
x = Activation("relu")(x)
model = Model([inputs, dinputs], x, name='DLSTMnet')
return model
batchsize = 32
timesteps = 10
featuresize = 4
units = 40
inputs = tf.random.uniform([batchsize, timesteps, featuresize])
dinputs = tf.random.uniform([batchsize, timesteps, featuresize])
model = dlstm(batchsize, timesteps, featuresize)
model([inputs, dinputs]).shape
>>> TensorShape([32, 4])
def train(train_x, train_dx, train_y, test_x, test_y):
model = dlstm()
adam = tf.keras.optimizers.Adam(learning_rate=0.01)
# 模型编译,设置模型的相关参数:优化器,损失函数和评价指标
model.compile(loss='mse',
optimizer=adam,
metrics=['acc'])
# 训练模型时每个epoch的参数[epoch,acc,loss,val_acc,val_loss]保存到csv文件中
# loss:训练集损失值,accuracy:训练集准确率,val_loss:测试集损失值,val_accruacy:测试集准确率
log = CSVLogger(f"./log.csv", separator=",", append=False)
# 当度量停止改进时,降低学习率
reduce = ReduceLROnPlateau(monitor='val_acc',
factor=0.5,
patience=3,
verbose=1,
mode='auto', min_delta=0.001, cooldown=0, min_lr=0.001)
model.fit([train_x, train_dx],
train_y,
epochs=5,
batch_size=32,
verbose=1,
validation_split=0.1,
callbacks=[log, reduce])
# 模型评估:与sklearn.score方法对应的是tf.keras.evaluate()方法,
# 返回的是损失值和在compile模型时要求的指标metrics
loss, acc = model.evaluate(test_x, test_y, verbose=1)
print('Loss : {}, Accuracy: {}'.format(loss, acc))
# model.save("./model.h5")
return model
model=train(train_seq, train_label,test_seq,test_label)
model.summary()
git clone https://github.com/pyecharts/pyecharts-assets.git
# 安装并激活插件
cd pyecharts-assets
jupyter nbextension install assets
jupyter nbextension enable assets/main