平均池化层
def avgpool(x_tensor, pool_ksize, pool_strides):
return tf.nn.avg_pool(x_tensor, ksize = [1, pool_ksize[0], pool_ksize[1], 1], strides = [1, pool_strides[0], pool_strides[1], 1], padding = 'VALID')
def conv2d(x_tensor, conv_num_outputs, conv_ksize, conv_strides, name): # TODO: Implement Function x_shape = x_tensor.get_shape().as_list() regularizer = tf.contrib.layers.l2_regularizer(scale = 0.0001) n = conv_ksize[0] * conv_ksize[1] * conv_num_outputs with tf.variable_scope(name): weights = tf.get_variable('conv_weights', shape = [conv_ksize[0], conv_ksize[1], x_shape[3], conv_num_outputs],initializer = tf.random_normal_initializer(stddev = np.sqrt(2.0 / n)),regularizer = regularizer) L = batch_norm(x_tensor,'bn') L = relu(L) L = tf.nn.conv2d(L, weights, strides = [1, conv_strides[0], conv_strides[1], 1], padding = 'SAME') return L
def conv2d_nobn(x_tensor, conv_num_outputs, conv_ksize, conv_strides, name): # TODO: Implement Function x_shape = x_tensor.get_shape().as_list() regularizer = tf.contrib.layers.l2_regularizer(scale = 0.0001) n = conv_ksize[0] * conv_ksize[1] * conv_num_outputs with tf.variable_scope(name): weights = tf.get_variable('conv_weights', shape = [conv_ksize[0], conv_ksize[1], x_shape[3], conv_num_outputs], initializer = tf.random_normal_initializer(stddev = np.sqrt(2.0 / n)), regularizer = regularizer) L = tf.nn.conv2d(x_tensor, weights, strides = [1, conv_strides[0], conv_strides[1], 1], padding = 'SAME') return L
def output(x_tensor, num_outputs): #全连接层 x_shape = x_tensor.get_shape().as_list() regularizer = tf.contrib.layers.l2_regularizer(scale = 0.0001) weights = tf.get_variable('weight', shape = [x_shape[1], num_outputs], initializer = tf.uniform_unit_scaling_initializer(factor = 1.0), regularizer = regularizer) bias = tf.Variable(tf.zeros([num_outputs])) out = tf.add(tf.matmul(x_tensor, weights), bias) return out