github博客传送门 博客园传送门
本章所需知识:
没有基础的请观看深度学习系列视频 tensorflow Python基础
资料下载链接:
深度学习基础网络模型(mnist手写体识别数据集)
MNIST数据集手写体识别(CNN实现)
import tensorflow as tf
import tensorflow. examples. tutorials. mnist. input_data as input_data
mnist = input_data. read_data_sets( '../MNIST_data/' , one_hot= True )
class CNNNet :
def __init__ ( self) :
self. x = tf. placeholder( dtype= tf. float32, shape= [ None , 28 , 28 , 1 ] , name= 'input_x' )
self. y = tf. placeholder( dtype= tf. float32, shape= [ None , 10 ] , name= 'input_y' )
self. w1 = tf. Variable( tf. truncated_normal( shape= [ 3 , 3 , 1 , 16 ] , dtype= tf. float32, stddev= tf. sqrt( 1 / 16 ) , name= 'w1' ) )
self. b1 = tf. Variable( tf. zeros( shape= [ 16 ] , dtype= tf. float32, name= 'b1' ) )
self. w2 = tf. Variable( tf. truncated_normal( shape= [ 3 , 3 , 16 , 32 ] , dtype= tf. float32, stddev= tf. sqrt( 1 / 32 ) , name= 'w2' ) )
self. b2 = tf. Variable( tf. zeros( shape= [ 32 ] , dtype= tf. float32, name= 'b2' ) )
self. fc_w1 = tf. Variable( tf. truncated_normal( shape= [ 28 * 28 * 32 , 128 ] , dtype= tf. float32, stddev= tf. sqrt( 1 / 128 ) , name= 'fc_w1' ) )
self. fc_b1 = tf. Variable( tf. zeros( shape= [ 128 ] , dtype= tf. float32, name= 'fc_b1' ) )
self. fc_w2 = tf. Variable( tf. truncated_normal( shape= [ 128 , 10 ] , dtype= tf. float32, stddev= tf. sqrt( 1 / 10 ) , name= 'fc_w2' ) )
self. fc_b2 = tf. Variable( tf. zeros( shape= [ 10 ] , dtype= tf. float32, name= 'fc_b2' ) )
def forward ( self) :
self. conv1 = tf. nn. relu( tf. nn. conv2d( self. x, self. w1, strides= [ 1 , 1 , 1 , 1 ] , padding= 'SAME' , name= 'conv1' ) + self. b1)
self. conv2 = tf. nn. relu( tf. nn. conv2d( self. conv1, self. w2, strides= [ 1 , 1 , 1 , 1 ] , padding= 'SAME' , name= 'conv2' ) + self. b2)
self. flat = tf. reshape( self. conv2, [ - 1 , 28 * 28 * 32 ] )
self. fc1 = tf. nn. relu( tf. matmul( self. flat, self. fc_w1) + self. fc_b1)
self. fc2 = tf. matmul( self. fc1, self. fc_w2) + self. fc_b2
self. output = tf. nn. softmax( self. fc2)
def backward ( self) :
self. cost = tf. reduce_mean( tf. nn. softmax_cross_entropy_with_logits( logits= self. fc2, labels= self. y) )
self. opt = tf. train. AdamOptimizer( ) . minimize( self. cost)
def acc ( self) :
self. acc2 = tf. equal( tf. argmax( self. output, 1 ) , tf. argmax( self. y, 1 ) )
self. accaracy = tf. reduce_mean( tf. cast( self. acc2, dtype= tf. float32) )
if __name__ == '__main__' :
net = CNNNet( )
net. forward( )
net. backward( )
net. acc( )
init = tf. global_variables_initializer( )
with tf. Session( ) as sess:
sess. run( init)
for i in range ( 10000 ) :
ax, ay = mnist. train. next_batch( 100 )
ax_batch = ax. reshape( [ - 1 , 28 , 28 , 1 ] )
loss, output, accaracy, _ = sess. run( fetches= [ net. cost, net. output, net. accaracy, net. opt] , feed_dict= { net. x: ax_batch, net. y: ay} )
if i % 10 == 0 :
test_ax, test_ay = mnist. test. next_batch( 100 )
test_ax_batch = test_ax. reshape( [ - 1 , 28 , 28 , 1 ] )
test_output = sess. run( net. output, feed_dict= { net. x: test_ax_batch} )
test_acc = tf. equal( tf. argmax( test_output, 1 ) , tf. argmax( test_ay, 1 ) )
accaracy2 = sess. run( tf. reduce_mean( tf. cast( test_acc, dtype= tf. float32) ) )
print ( accaracy2)
最后附上训练截图: