代码如下:
def seBlock(input_feature):
"""
SE注意力模块:Squeeze-and-Excitation Block.
Returns
output
"""
x = layers.GlobalAveragePooling2D()(input_feature)
x = layers.Dense(units=input_feature.shape[-1] // 8, use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dense(units=input_feature.shape[-1], use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('sigmoid')(x)
outputs = layers.Multiply()([input_feature, x]) # 对输入的特征图进行缩放
return outputs
def cbamBlock(input_feature, reduction=16):
"""
CBAM注意力模块:Convolutional Block Attention Module
Args:
input_feature: 输入特征张量。
reduction: 可选参数,默认值为16。
控制注意力模块中全连接层输出特征大小的减少倍数。
这是一个定义 CBAM 模块的函数 cbamBlock
它实现了 Convolutional Block Attention Module(卷积块注意力模块)
该模块接受 4 维张量 (batch_size, height, width, channel) 作为输入特征。
Returns
output
"""
x = layers.GlobalAveragePooling2D()(input_feature)
x = layers.Dense(input_feature.shape[-1] // reduction,
activation='relu', kernel_initializer='he_normal')(x)
x = layers.Dense(input_feature.shape[-1],
activation='sigmoid', kernel_initializer='he_normal')(x)
x = layers.Reshape((1, 1, input_feature.shape[-1]))(x)
x = layers.Multiply()([input_feature, x])
y = layers.GlobalMaxPooling2D()(x)
y = layers.Dense(input_feature.shape[-1] // reduction,
activation='relu', kernel_initializer='he_normal')(y)
y = layers.Dense(input_feature.shape[-1],
activation='sigmoid', kernel_initializer='he_normal')(y)
y = layers.Reshape((1, 1, input_feature.shape[-1]))(y)
y = layers.Multiply()([input_feature, y])
return layers.Add()([x, y])
def ecaBlock(input_feature, b=1, gamma=2):
"""
ECA注意力模块:Efficient Channel Attention.
Args:
input_feature: 输入特征张量。
b: 可选参数,默认值为1。我们在计算ECA卷积核大小时使用的偏置项。
gamma: 可选参数,默认值为2。用于调整 ECA 卷积核大小随着输入通道数量的变化而变化的速度。
这是一个定义 ECA 模块的函数 eca_block
它的输入特征是一个 4 维张量 (batch_size, height, width, channel)
其中第四维为通道数.
Returns
output
"""
channel = input_feature.shape[-1]
kernel_size = int(abs((math.log2(channel) + b) / gamma)) | 1
avg_pool = layers.GlobalAveragePooling2D(data_format='channels_last')(input_feature)
x = layers.Reshape((-1, 1))(avg_pool)
x = layers.Conv1D(filters=1, kernel_size=kernel_size,
padding="same", use_bias=False)(x)
x = layers.Activation('sigmoid')(x)
x = layers.Reshape((1, 1, -1))(x)
output = layers.multiply([input_feature, x])
return output