layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1)) #111 卷积核的高度,卷积核的宽度,图像通道数,depth=16
layer1_biases = tf.Variable(tf.zeros([depth]))
layer2_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, depth, depth], stddev=0.1))
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth])) #222 bias都写的是1维的,参见官方文档
layer3_weights = tf.Variable(tf.truncated_normal(
[image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1)) #222 全连接层前面的连接,stride=2导致高、宽缩小
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1)) #333 全连接层后面的连接
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))
# Model.
def model(data):
conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME') #111 注意这四个数字与前面 input data 对应,表示各维度上的stride
hidden = tf.nn.relu(conv + layer1_biases)
conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(conv + layer2_biases)
shape = hidden.get_shape().as_list() #如果在这里print shape[0],运行会得到16,10000,10000,但是run的时候不会有输出,可以看tf底层设计
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]]) #222 shape[1] * shape[2] * shape[3], shape[0]是batch_size
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
return tf.matmul(hidden, layer4_weights) + layer4_biases