应用TensorFlow构建lstm

应用TensorFlow构建lstm

import tensorflow as tf
from tensorflow.contrib import rnn

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

learning_rate = 0.001
training_steps = 10000
batch_size = 128
display_step = 200

num_inputs = 28 # 每个时间步的输入数据特征数
timesteps = 28  # 时间步数目
num_hiddens = 64 # LSTM cell中的units数量
num_classes = 10 # 类别数目
num_layers = 2   # 循环神经网络层数

X = tf.placeholder("float", [None, timesteps, num_inputs])
Y = tf.placeholder("float",[None, num_classes])
keep_prob = tf.placeholder(tf.float32, [])

weights = {
    'out': tf.Variable(tf.random_normal([num_hiddens, num_classes]))
}
biases = {
    'out': tf.Variable(tf.random_normal([num_classes]))
}

def lstm(lstm_x, weights, biases, keep_prob):
    # 多层lstm
    multi_cell = []
    for i in range(num_layers):
        # num_units是LSTM输出结果的维度
        lstm_cell = rnn.BasicLSTMCell(num_units=num_hiddens, forget_bias=1.0, state_is_tuple=True)
        # dropout 一般只设置 output_keep_prob
        # 丢弃机制只应该在训练期间使用
        lstm_cell = rnn.DropoutWrapper(cell=lstm_cell,input_keep_prob=1.0,output_keep_prob=keep_prob)
        multi_cell.append(lstm_cell)
    
    #  MultiRNNCell 来实现多层 LSTM    
    mlstm_cell = rnn.MultiRNNCell(multi_cell, state_is_tuple=True)
    # 用全零来初始化state
    init_state = mlstm_cell.zero_state(batch_size, dtype=tf.float32)
    # time_major==False 时, outputs.shape = [batch_size, time_step_size, hidden_size]
    outputs, state = tf.nn.dynamic_rnn(mlstm_cell, inputs=lstm_x, initial_state=init_state, time_major=False) 
    
    # 注意tf.nn.static_rnn中inputs为按时间步展开的[batch_size,input_size],x=tf.unstack(x,axis=1) 
    
    # 取最后一个时间步的输出(batch_size, num_hiddens)
    h_state = outputs[:, -1, :]
    # 或者 h_state = state[-1][1]
    # or 调整维度为 (steps, batch, hidden_size)
    # outputs = tf.transpose(outputs, [1,0,2])
    # h_state = outputs[-1]
    
    # 全连接层,输出(batch_size, num_classes)
    return tf.matmul(h_state, weights['out']) + biases['out']

logits = lstm(X, weights, biases, keep_prob)

# 定义损失及优化器
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# 计算准确度
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    
    for step in range(1, training_steps+1):
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        # 注意x数据维度
        batch_x = batch_x.reshape((batch_size, timesteps, num_inputs))
        sess.run(train_op, feed_dict={X:batch_x, Y:batch_y, keep_prob:0.9})
        if step % display_step == 0 or step == 1:

            loss, acc = sess.run([loss_op, accuracy], feed_dict={X:batch_x, Y:batch_y, keep_prob:1.})
            print("Step " + str(step) + ", Minibatch Loss= " + \
                  "{:.4f}".format(loss) + ", Training Accuracy= " + \
                  "{:.3f}".format(acc))

    print("Optimization Finished!")

    # Calculate accuracy for 128 mnist test images
    test_len = 128
    test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_inputs))
    test_label = mnist.test.labels[:test_len]
    print("Testing Accuracy:", sess.run(accuracy, feed_dict={X: test_data, Y: test_label, keep_prob:1.}))
展开阅读全文

没有更多推荐了,返回首页