Tensorflow VGG网络构建手写数字识别系统设计
1、建立VGG16模型
class VGG16_Mode():
""" create vgg16 network use tensorflow
VGG16 network structure:
(conv 3x3 64)=>(conv 3x3 64, pool/2)
(conv 3x3 128)=>(conv 3x3 128, pool/2)
(conv 3x3 256)=>(conv 3x3 256)=>(conv 3x3 256)=>(conv 3x3 256, pool/2)
(conv 3x3 512)=>(conv 3x3 512)=>(conv 3x3 512)=>(conv 3x3 512, pool/2)
(fc 4096)=>(fc 4096)=>(fc classes)
"""
def variable_summaries(self, var, name):
with tf.name_scope('summaries'):
tf.summary.histogram(name, var)
mean = tf.reduce_mean(var)
tf.summary.scalar('mean/' + name, mean)
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev/' + name, stddev)
def conv_layer(self, data, ksize, stride, name, w_biases = False,padding = "SAME"):
with tf.variable_scope(name) as scope:
with tf.name_scope(name):
with tf.name_scope("weights"):
w_init = tf.contrib.layers.xavier_initializer()
w = tf.get_variable(shape= ksize, initializer= w_init,name= 'w')
self.variable_summaries(w, name + "/weights")
with tf.name_scope("biases"):
biases = tf.Variable(tf.constant(0.0, shape=[ksize[3]], dtype=tf.float32), 'biases')
self.variable_summaries(biases, name + "/biases")
with tf.name_scope("Wx_plus_b"):
if w_biases == False:
cov = tf.nn.conv2d(input= data, filter= w, strides= stride, padding= padding)
else:
cov = tf.nn.conv2d(input= data,filter= w, stride= stride,padding= padding) + biases
# 记录神经网络节点输出在经过激活函数之后的分布。
tf.summary.histogram(name + '/activations', cov)
return cov
def pool_layer(self, data, ksize, stride, name, padding= 'VALID'):
with tf.variable_scope(name) as scope:
max_pool = tf.nn.max_pool(value= data, ksize= ksize, strides= stride,padding= padding)
return max_pool
def flatten(self,data):
[a,b,c,d] = data.shape
ft = tf.reshape(data,[-1,b*c*d])
return ft
def fc_layer(self,data,name,fc_dims):
with tf.variable_scope(name) as scope:
with tf.name_scope(name):
with tf.name_scope("weights"):
w_init = tf.contrib.layers.xavier_initializer()
w = tf.get_variable(shape=[data.shape[1],fc_dims],name= 'w',initializer=w_init)
self.variable_summaries(w, name + "/weights")
with tf.name_scope("biases"):
biases = tf.Variable(tf.constant(0.0, shape=[fc_dims], dtype=tf.float32), 'biases')
self.variable_summaries(biases, name + "/biases")
with tf.name_scope("Wx_plus_b"):
fc = tf.nn.relu(tf.matmul(data,w)+ biases)
tf.summary.histogram(name + '/pre_activations', tf.matmul(data,w)+ biases)
# 记录神经网络节点输出在经过激活函数之后的分布。
tf.summary.histogram(name + '/activations', fc)
return fc
def finlaout_layer(self,data,name,fc_dims):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
with tf.name_scope(