h_conv1 = tf.contrib.layers.conv2d(x_image,64,[5,5],[1,1],'SAME',activation_fn=tf.nn.relu)
等同于
filter_shape = [5,5,1,64] W = tf.Variable(tf.truncated_normal(filter_shape,stddev=0.1),name="W") b = tf.Variable(tf.constant(0.1,shape=[64]),name="b") #卷积层 conv = tf.nn.conv2d(x,W,strides=[1,1,1,1],padding="SAME",name="conv") #激活函数 h = tf.nn.relu(tf.nn.bias_add(conv,b),name="relu")
def convolution(
inputs,输入数据
num_outputs,输出维度,不需要指定输入维度,函数会自动得到inputs中的输入维度
kernel_size,卷积核大小,[a,b],如果a,b相同,则直接写a即可
strides=1, 步长,格式同上
padding='SAME',padding
data_format=None,
rete=1,
activation_fn=nn.relu,激活函数
normalizer_fn=None,
normalizer_params=None,
weight_initializer=initializers.xavier_initializer(),权重初始化
weights_regularizer=None,正则化项
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=Node,
variables_collections=None,
outputs_collections=None,
trainable=True,是否可训练
scope=None):