NCM模块

import torch
import torch.nn as nn


class NCM(nn.Module):
    def __init__(self, in_ch, out_ch):
        super(NCM, self).__init__()
        self.con1x1 = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),
                                    nn.BatchNorm2d(out_ch),
                                    nn.ReLU(inplace=True))
        self.con1_1 = nn.Sequential(nn.Conv2d(out_ch, in_ch, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),
                                    nn.BatchNorm2d(in_ch),
                                    nn.ReLU(inplace=True))
        self.SE = nn.AdaptiveAvgPool2d(1)
        mid_ch = in_ch // 16
        self.shared_MLP = nn.Sequential(
            nn.Linear(in_ch, mid_ch),
            nn.ReLU(),
            nn.Linear(mid_ch, in_ch)
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x1 = self.con1x1(x)
        b, c, w, h = x1.shape
        x2 = self.con1x1(x)
        x3 = self.con1x1(x)
        x4 = self.shared_MLP(self.SE(x).view(x.size(0), -1)).unsqueeze(2).unsqueeze(3)
        x4 = self.sigmoid(x4)
        x4 = self.con1x1(x4)
        mul = torch.matmul(x2.view(c, -1).permute(1, 0), x3.view(c, -1))
        mul = torch.softmax(mul, dim=1)
        resh = torch.matmul(x1.view(c, -1), mul)
        # print(resh.shape)
        resh = resh.view(b, c, w, h)
        # print(resh.shape)
        final = resh * x4
        final = self.con1_1(final)
        final = x + final

        return final


if __name__ == '__main__':
    model = NCM(in_ch=512, out_ch=256)
    x = torch.randn(16, 512, 24, 24)
    y = model(x)
    print(y.shape)
通道注意力与自注意力的结合的结构图

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值