import os
os. environ[ 'TF_CPP_MIN_LOG_LEVEL' ] = '2'
import tensorflow. compat. v1 as tf
from tensorflow. examples. tutorials. mnist import input_data
tf. compat. v1. disable_eager_execution( )
mnist = input_data. read_data_sets( "MNIST_data/" , one_hot= True )
n_inputs = 28
max_time = 28
lstm_size = 100
n_classes = 10
batch_size = 64
n_batch = mnist. train. num_examples // batch_size
x = tf. placeholder( tf. float32, [ None , 784 ] )
y = tf. placeholder( tf. float32, [ None , 10 ] )
weights = tf. Variable( tf. truncated_normal( [ lstm_size, n_classes] , stddev= 0.1 ) )
biases = tf. Variable( tf. constant( 0.1 , shape= [ n_classes] ) )
def RNN ( X, weights, biases) :
inputs = tf. reshape( X, [ - 1 , max_time, n_inputs] )
lstm_cell = tf. nn. rnn_cell. LSTMCell( lstm_size)
outputs, final_state = tf. nn. dynamic_rnn( lstm_cell, inputs, dtype= tf. float32)
results = tf. nn. softmax( tf. matmul( final_state[ 1 ] , weights) + biases)
return results
prediction= RNN( x, weights, biases)
loss = tf. losses. softmax_cross_entropy( y, prediction)
train_step = tf. train. AdamOptimizer( 1e - 3 ) . minimize( loss)
correct_prediction = tf. equal( tf. argmax( y, 1 ) , tf. argmax( prediction, 1 ) )
accuracy = tf. reduce_mean( tf. cast( correct_prediction, tf. float32) )
init = tf. global_variables_initializer( )
with tf. Session( ) as sess:
sess. run( init)
for epoch in range ( 11 ) :
for batch in range ( n_batch) :
batch_xs, batch_ys = mnist. train. next_batch( batch_size)
sess. run( train_step, feed_dict= { x: batch_xs, y: batch_ys} )
acc = sess. run( accuracy, feed_dict= { x: mnist. test. images, y: mnist. test. labels} )
print ( "Iter " + str ( epoch) + ", Testing Accuracy= " + str ( acc) )