import tensorflow as tf
from tensorflow. examples. tutorials. mnist import input_data
mnist = input_data. read_data_sets( "C:/PythonWork/Datasets/MNIST" , one_hot= True )
batch_size = 100
n_batch = mnist. train. num_examples // batch_size
x = tf. placeholder( tf. float32, [ None , 784 ] )
x_image = tf. reshape( x, [ - 1 , 28 , 28 , 1 ] )
w_conv1 = tf. Variable( tf. truncated_normal( [ 5 , 5 , 1 , 32 ] , stddev= 0.1 ) )
b_conv1 = tf. Variable( tf. zeros( [ 32 ] ) + 0.1 )
o_conv1 = tf. add( tf. nn. conv2d( x_image, w_conv1, strides= [ 1 , 1 , 1 , 1 ] , padding= 'SAME' ) , b_conv1)
activate_conv1 = tf. nn. relu( o_conv1)
o_poo11 = tf. nn. max_pool( activate_conv1, ksize= [ 1 , 2 , 2 , 1 ] , strides= [ 1 , 2 , 2 , 1 ] , padding= 'SAME' )
w_conv2 = tf. Variable( tf. truncated_normal( [ 5 , 5 , 32 , 64 ] , stddev= 0.1 ) )
b_conv2 = tf. Variable( tf. zeros( [ 64 ] ) + 0.1 )
o_conv2 = tf. add( tf. nn. conv2d( o_poo11, w_conv2, strides= [ 1 , 1 , 1 , 1 ] , padding= 'SAME' ) , b_conv2)
activate_conv2 = tf. nn. relu( o_conv2)
o_poo12 = tf. nn. max_pool( activate_conv2, ksize= [ 1 , 2 , 2 , 1 ] , strides= [ 1 , 2 , 2 , 1 ] , padding= 'SAME' )
o_pool2_flat = tf. reshape( o_poo12, [ - 1 , 7 * 7 * 64 ] )
w_fc1 = tf. Variable( tf. truncated_normal( [ 7 * 7 * 64 , 1024 ] , stddev= 0.1 ) )
b_fc1 = tf. Variable( tf. zeros( [ 1024 ] ) + 0.1 )
o_fc1 = tf. add( tf. matmul( o_pool2_flat, w_fc1) , b_fc1)
activate_fc1 = tf. nn. relu( o_fc1)
drop_out_rate = tf. placeholder( tf. float32)
tf. nn. dropout( activate_fc1, drop_out_rate)
w_fc2 = tf. Variable( tf. truncated_normal( [ 1024 , 10 ] , stddev= 0.1 ) )
b_fc2 = tf. Variable( tf. zeros( [ 10 ] ) + 0.1 )
logits = tf. add( tf. matmul( activate_fc1, w_fc2) , b_fc2)
prediction = tf. nn. softmax( logits)
labels = tf. placeholder( tf. float32, [ None , 10 ] )
loss = tf. reduce_mean( tf. nn. softmax_cross_entropy_with_logits( labels= labels, logits= logits) )
optimizer = tf. train. AdamOptimizer( 1e - 4 )
train = optimizer. minimize( loss)
accuracy_rate = tf. reduce_mean( tf. cast( tf. equal( tf. argmax( prediction, 1 ) , tf. argmax( labels, 1 ) ) , tf. float32) )
with tf. Session( ) as session:
session. run( tf. global_variables_initializer( ) )
for i in range ( 21 ) :
for j in range ( n_batch) :
batch_x, batch_labels = mnist. train. next_batch( batch_size)
session. run( train, feed_dict= { x: batch_x, labels: batch_labels, drop_out_rate: 0.7 } )
test_acc, l = session. run( [ accuracy_rate, loss] , feed_dict= { x: mnist. test. images, labels: mnist. test. labels, drop_out_rate: 1.0 } )
print ( "Train " + str ( i) + " Times: Testing Accuracy Rate = " + str ( test_acc) + ' loss:' + str ( l) )
import tensorflow as tf
import numpy as np
from tensorflow. examples. tutorials. mnist import input_data
def train_input_fn ( batch_size, mnist) :
images = mnist. train. images
images = tf. reshape( images, [ - 1 , 28 , 28 , 1 ] )
labels = mnist. train. labels
images_ds = tf. data. Dataset. from_tensor_slices( images)
labels_ds = tf. data. Dataset. from_tensor_slices( labels)
images_ds = images_ds. repeat( )
labels_ds = labels_ds. repeat( )
images_ds = images_ds. batch( batch_size)
labels_ds = labels_ds. batch( batch_size)
images_it = images_ds. make_one_shot_iterator( )
labels_it = labels_ds. make_one_shot_iterator( )
images = images_it. get_next( )
labels = labels_it. get_next( )
return { 'images' : images} , labels
def test_input_fn ( batch_size, mnist) :
images = mnist. test. images
images = tf. reshape( images, [ - 1 , 28 , 28 , 1 ] )
labels = mnist. test. labels
images_ds = tf. data. Dataset. from_tensor_slices( images)
labels_ds = tf. data. Dataset. from_tensor_slices( labels)
images_ds = images_ds. repeat( )
labels_ds = labels_ds. repeat( )
images_ds = images_ds. batch( batch_size)
labels_ds = labels_ds. batch( batch_size)
images_it = images_ds. make_one_shot_iterator( )
labels_it = labels_ds. make_one_shot_iterator( )
images = images_it. get_next( )
labels = labels_it. get_next( )
return { 'images' : images} , labels
def predict_input_fn ( batch_size, mnist) :
images = mnist. test. images[ 0 ]
images = tf. reshape( images, [ - 1 , 28 , 28 , 1 ] )
labels = mnist. test. labels[ 0 ]
images_ds = tf. data. Dataset. from_tensor_slices( images)
labels_ds = tf. data. Dataset. from_tensor_slices( labels)
images_ds = images_ds. batch( batch_size)
labels_ds = labels_ds. batch( batch_size)
images_it = images_ds. make_one_shot_iterator( )
labels_it = labels_ds. make_one_shot_iterator( )
images = images_it. get_next( )
labels = labels_it. get_next( )
return { 'images' : images} , labels
def model_fn ( features, labels, mode, params) :
def create_model ( features, is_training) :
net = features[ 'images' ]
net = tf. layers. conv2d( inputs= net, filters= 32 , kernel_size= [ 5 , 5 ] , strides= ( 1 , 1 ) , padding= 'same' , activation= tf. nn. relu,
kernel_initializer= tf. truncated_normal_initializer( stddev= 0.1 ) ,
bias_initializer= tf. constant_initializer( value= 0.1 ) ,
trainable= is_training)
net = tf. layers. max_pooling2d( inputs= net, pool_size= [ 2 , 2 ] , strides= [ 2 , 2 ] , padding= 'same' )
net = tf. layers. conv2d( inputs= net, filters= 64 , kernel_size= [ 5 , 5 ] , strides= ( 1 , 1 ) , padding= 'same' , activation= tf. nn. relu,
kernel_initializer= tf. truncated_normal_initializer( stddev= 0.1 ) ,
bias_initializer= tf. constant_initializer( value= 0.1 ) ,
trainable= is_training)
net = tf. layers. max_pooling2d( inputs= net, pool_size= [ 2 , 2 ] , strides= [ 2 , 2 ] , padding= 'same' )
net = tf. layers. flatten( inputs= net)
net = tf. layers. dense( inputs= net, units= 1024 , activation= tf. nn. relu,
kernel_initializer= tf. truncated_normal_initializer( stddev= 0.1 ) ,
bias_initializer= tf. constant_initializer( value= 0.1 ) ,
trainable= is_training)
net = tf. layers. dropout( inputs= net, rate= 0.7 , training= is_training)
logits = tf. layers. dense( inputs= net, units= 10 ,
kernel_initializer= tf. truncated_normal_initializer( stddev= 0.1 ) ,
bias_initializer= tf. constant_initializer( value= 0.1 ) ,
trainable= is_training)
return logits
logits = create_model( features, mode== tf. estimator. ModeKeys. TRAIN)
if mode == tf. estimator. ModeKeys. TRAIN:
loss = tf. losses. softmax_cross_entropy( onehot_labels= labels, logits= logits)
tf. identity( loss, name= 'loss' )
optimizer = tf. train. AdamOptimizer( 1e - 4 )
train_op = optimizer. minimize( loss, global_step= tf. train. get_global_step( ) )
return tf. estimator. EstimatorSpec( mode, loss= loss, train_op= train_op)
if mode == tf. estimator. ModeKeys. EVAL:
loss = tf. losses. softmax_cross_entropy( onehot_labels= labels, logits= logits)
accuracy = tf. metrics. accuracy( labels= tf. argmax( labels, 1 ) , predictions= tf. argmax( logits, 1 ) , name= 'acc_op' )
metrics = { 'accuracy' : accuracy}
tf. summary. scalar( 'accuracy' , accuracy)
return tf. estimator. EstimatorSpec( mode, loss= loss, eval_metric_ops= metrics)
if mode == tf. estimator. ModeKeys. PREDICT:
predictions = {
'logits' : logits,
'probabilities' : tf. nn. softmax( logits) ,
'class_ids' : tf. argmax( logits, 1 )
}
return tf. estimator. EstimatorSpec( mode, predictions= predictions)
classifier = tf. estimator. Estimator(
model_fn= model_fn,
model_dir= 'C:/Users/VCC/work/CNN_output' ,
config= tf. estimator. RunConfig( save_checkpoints_steps= 1000 , keep_checkpoint_max = 1 ) ,
params= { }
)
mnist = input_data. read_data_sets( "C:/PythonWork/Datasets/MNIST" , one_hot= True )
tf. logging. set_verbosity( tf. logging. INFO)
tensors_to_log = { "loss" : 'loss' }
logging_hook = tf. train. LoggingTensorHook( tensors= tensors_to_log, every_n_iter= 300 )
classifier. train( input_fn= lambda : train_input_fn( 100 , mnist) , steps= 11000 , hooks= [ logging_hook] )
evaluation = classifier. evaluate( input_fn= lambda : test_input_fn( 10000 , mnist) , steps= 1 )
print ( evaluation)
predictions = classifier. predict( input_fn= lambda : predict_input_fn( 1 , mnist) )
for p in predictions:
print ( p)
print ( p[ 'class_ids' ] )
print ( p[ 'probabilities' ] [ p[ 'class_ids' ] ] )
print ( p[ 'logits' ] [ p[ 'class_ids' ] ] )
a = [ 0 : 3 ]
print ( a)