我是麦克斯在紧张状态下拼搏。但是我不需要使用内置的tf.nn.max_pool(),而是必须使用tf.reduce_max()。但它给出了一个错误:ValueError: Shape must be rank 4 but is rank 5 for 'conv2_1/Conv2D' (op: 'Conv2D') with input shapes: [1,?,1,224,64], [3,3,64,128].
下面是代码:with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
self.pool1=tf.reduce_max(self.conv1_2,reduction_indices=[1], keep_dims=True)
# conv2_1
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
sess = tf.InteractiveSession()
tf.Print(self.pool1,[self.pool1],message="hellow fatima")
conv = tf.nn.conv2d([self.pool1], kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]