代码中num_step可以设置为需要的序列长度,常常为了梯度反传的方便将num_step设置为较小的值。
def LSTM_single( name, _X, _istate, _weights, _biases):
# input shape: (batch_size, n_steps, n_input)
_X = tf.transpose(_X, [1, 0, 2]) # permute num_steps and batch_size
# Reshape to prepare input to hidden activation
_X = tf.reshape(_X, [self.num_steps * self.batch_size, self.num_input]) # (num_steps*batch_size, num_input)
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(axis=0, num_or_size_splits=self.num_steps, value=_X) # n_steps * (batch_size, num_input)
#print("_X: ", _X)
#cell = tf.nn.rnn_cell.LSTMCell(self.num_input, self.num_input)
cell = tf.contrib.rnn.LSTMCell(self.num_input, state_is_tuple=False)
state = _istate
output