from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
参数
# Training Parameters
learning_rate =0.001
training_steps =10000
batch_size =128
display_step =200
# Network Parameters
num_input =28 # MNIST data input(img shape:28*28)
timesteps =28 # timesteps
num_hidden =128 # hidden layer num of features
num_classes =10 # MNIST total classes(0-9 digits)
def BiRNN(x, weights, biases):
# Prepare data shape to match `rnn`function requirements
# Current data input shape:(batch_size, timesteps, n_input)
# Required shape:'timesteps' tensors list ofshape(batch_size, num_input)
# Unstack to get a list of'timesteps' tensors ofshape(batch_size, num_input)
x = tf.unstack(x, timesteps,1)
# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
# Get lstm cell output
try:
outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
except Exception: # Old TensorFlow version only returns outputs not states
outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out'])+ biases['out']
loss, optimizer, acc,
logits =BiRNN(X, weights, biases)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model(with test logits,for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
训练,测试
# Initialize the variables(i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session()as sess:
# Run the initializer
sess.run(init)for step inrange(1, training_steps+1):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get28 seq of28 elements
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op(backprop)
sess.run(train_op, feed_dict={X: batch_x,Y: batch_y})if step % display_step ==0 or step ==1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,Y: batch_y})print("Step "+str(step)+", Minibatch Loss= "+ \
"{:.4f}".format(loss)+", Training Accuracy= "+ \
"{:.3f}".format(acc))print("Optimization Finished!")
# Calculate accuracy for128 mnist test images
test_len =128
test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={X: test_data,Y: test_label}))