import tensorflow as tf
import numpy as np
import matplotlib as mpl
#mpl.use('Agg')
from matplotlib import pyplot as plt
# 隐层节点个数
HIDDEN_SIZE = 30
# 深层循环神经网络层数
NUM_LAYERS = 2
# 循环神经网络的训练序列长度,即循环体展开次数
TIME_STEPS = 10
# 训练轮次
TRAINING_STEPS = 10000
# batchsize
BATCH_SIZE = 32
# 训练数据个数
TRAIN_EXAMPLES = 10000
# 测试数据个数
TEST_EXAMPLES = 1000
# 采样间隔
SAMPLE_GAP = 0.01
def generate_data(seq):
X = []
y = []
# 序列的第i项和后面的TIME_STEPS-1项合在一起作为输入;第i+TIME_STEPS项作为输出
# 即用sin函数前面的TIME_STEPS个点的信息预测第i+TIME_STEPS个点的函数值
for i in range(len(seq) - TIME_STEPS):
X.append([seq[i : i + TIME_STEPS]])
y.append([seq[i + TIME_STEPS]])
return np.array(X, dtype=np.float32), np.array(y,dtype=np.float32)
def lstm_model(X, y, is_training):
# 构建网络方式一
lstm_cells = [tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.BasicLSTMCell(num_units=HIDDEN_SIZE)) for _ in range(NUM_LAYERS)]
multi_lstm_cell = tf.nn.rnn_cell.MultiRNNCell(lstm_cells)
# 构建网络方式二
# multi_lstm_cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE) for _ in range(NUM_LAYERS)])
# 构建网络方式三
# lstm_cell = tf.nn.rnn_cell.BasicLSTMCell
# multi_lstm_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(num_units=HIDDEN_SIZE) for _ in range(NUM_LAYERS)])
# 错误的构建网络方式
# lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=HIDDEN_SIZE)
# multi_lstm_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell for _ in range(NUM_LAYERS)])
# 使用tf.nn.dynamic_rnn接口将多层的LSTM结构连接成RNN网络并计算其前向传播结果
outputs,_ = tf.nn.dynamic_rnn(multi_lstm_cell, X, dtype=tf.float32)
# outputs是顶层LSTM在每一步的输出结果,它的维度是[BATCH_SIZE, TIME_STEPS, HIDDEN_SIZE],在本实验中只需要关注最后一个时刻的输出
output = outputs[:,-1,:]
# 对LSTM网络的输出再做一层全连接层并计算损失
pred = tf.contrib.layers.fully_connected(output,1, activation_fn = None)
# 只有在训练时计算损失函数和优化步骤,测试时直接返回预测结果
if not is_training:
return pred, None, None
# 计算损失
loss = tf.losses.mean_squared_error(labels=y,predictions=pred)
# 创建模型优化器并得到优化步骤
#train_op = tf.contrib.layers.optimize_loss(loss, tf.train.get_global_step(),optimizer='Adagrad', learning_rate=0.1)
train_op = tf.train.AdagradOptimizer(0.1).minimize(loss)
return pred, loss, train_op
def train(sess, train_X, train_y):
ds = tf.data.Dataset.from_tensor_slices((train_X, train_y))
ds = ds.repeat().shuffle(1000).batch(BATCH_SIZE)
X,y = ds.make_one_shot_iterator().get_next()
with tf.variable_scope('model'):
pred, loss, train_op = lstm_model(X, y, True)
sess.run(tf.global_variables_initializer())
for i in range(TRAINING_STEPS):
_,l = sess.run([train_op, loss])
if i % 100 == 0:
print('train step: ' + str(i) + ' , loss: ' + str(l))
def eval(sess,test_X,test_y):
ds = tf.data.Dataset.from_tensor_slices((test_X,test_y))
ds = ds.batch(1)
X,y = ds.make_one_shot_iterator().get_next()
# 调用模型得到计算结果,不需要输入真实的y值
with tf.variable_scope('model',reuse=True):
pred,_,_ = lstm_model(X,[0.0],False)
# 将预测结果存入一个数组
preds = []
labels = []
for i in range(TEST_EXAMPLES):
p,l = sess.run([pred,y])
preds.append(p)
labels.append(l)
preds = np.array(preds).squeeze()
labels = np.array(labels).squeeze()
rmse = np.sqrt(((preds - labels) ** 2).mean(axis=0))
print("mean square error is : %f " % rmse)
plt.figure()
plt.plot(preds, label='predicted')
plt.plot(labels,label='real_sin')
plt.legend()
plt.show()
test_start = (TRAIN_EXAMPLES + TIME_STEPS) * SAMPLE_GAP
test_end = test_start + (TEST_EXAMPLES + TIME_STEPS) * SAMPLE_GAP
train_X,train_y = generate_data(np.sin(np.linspace(0,test_start,TRAIN_EXAMPLES+TIME_STEPS,dtype=np.float32)))
test_X,test_y = generate_data(np.sin(np.linspace(test_start,test_end,TEST_EXAMPLES + TIME_STEPS,dtype=np.float32)))
with tf.Session() as sess:
train(sess, train_X,train_y)
#train(sess, test_X,test_y) # 会在L42出报错:ValueError: Variable model/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope? Originally defined at:
eval(sess, test_X,test_y)
![ellipse](https://img-blog.csdnimg.cn/20201116152152615.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L2xpdWdhbjUyOA==,size_16,color_FFFFFF,t_70#pic_center)
图1 深层循环神经网络进行sin函数预测
Reference
郑泽宇等.TensorFLow实战Google深度学习框架(第2版),电子工业出版社,2018.