一、提出的方法框架
FS:融合策略
二、训练的框架
nb_filter = [64, 112, 160, 208, 256]
三、融合策略
(1)基于注意模型的融合策略;
def attention_fusion_weight(tensor1, tensor2, p_type):
# avg, max, nuclear
f_channel = channel_fusion(tensor1, tensor2, p_type)#通道融合
f_spatial = spatial_fusion(tensor1, tensor2)#空间融合
tensor_f = (f_channel + f_spatial) / 2
return tensor_f
(2)基于空间注意的融合策略:
def spatial_fusion(tensor1, tensor2, spatial_type='mean'):#空间融合输入两个特征张量
shape = tensor1.size()
# calculate spatial attention
spatial1 = spatial_attention(tensor1, spatial_type)#张量1求空间均值
spatial2 = spatial_attention(tensor2, spatial_type)
# get weight map, soft-max
spatial_w1 = torch.exp(spatial1) / (torch.exp(spatial1) + torch.exp(spatial2) + EPSILON)#两个指数化空间注意力值相加
spatial_w2 = torch.exp(spatial2) / (torch.exp(spatial1) + torch.exp(spatial2) + EPSILON)
spatial_w1 = spatial_w1.repeat(1, shape[1], 1, 1)#通道维度进行复制
spatial_w2 = spatial_w2.repeat(1, shape[1], 1, 1)
tensor_f = spatial_w1 * tensor1 + spatial_w2 * tensor2
return tensor_f
(3)通道注意力融合策略:
def channel_fusion(tensor1, tensor2, p_type):
# global max pooling
shape = tensor1.size()
# calculate channel attention
global_p1 = channel_attention(tensor1, p_type)#池化后的张量
global_p2 = channel_attention(tensor2, p_type)
# get weight map
global_p_w1 = global_p1 / (global_p1 + global_p2 + EPSILON)
global_p_w2 = global_p2 / (global_p1 + global_p2 + EPSILON)
global_p_w1 = global_p_w1.repeat(1, 1, shape[2], shape[3])
global_p_w2 = global_p_w2.repeat(1, 1, shape[2], shape[3])
tensor_f = global_p_w1 * tensor1 + global_p_w2 * tensor2
return tensor_f
#global+soft-max