点这里查看上篇文章
Github:kobiso/CBAM-tensorflow
下面这个代码是再通道注意力模块时,将平均和最大池化后得到得描述子分别输入MLP,从代码中也可以看到连个描述子得MLP权重共享。
def cbam_block(input_feature, index, reduction_ratio=8):
with tf.variable_scope('cbam_%s' % index):
attention_feature = channel_attention(input_feature, index, reduction_ratio)
attention_feature = spatial_attention(attention_feature, index)
print("hello CBAM")
return attention_feature
def channel_attention(input_feature, index, reduction_ratio=8):
kernel_initializer = tf.contrib.layers.variance_scaling_initializer()
bias_initializer = tf.constant_initializer(value=0.0)
with tf.variable_scope('ch_attention_%s' % index):
feature_map_shape = input_feature.get_shape()
channel = input_feature.get_shape()[-1]
avg_pool =<