【暂存】multi-head output

class ConResNet(nn.Module):
    """backbone + projection head"""
    def __init__(self, name='resnet50', head='mlp', feat_dim=128, selfcon_pos=[False,False,False], selfcon_arch='resnet', selfcon_size='same', dataset=''):
        super(ConResNet, self).__init__()
        model_fun, dim_in = model_dict[name]
        self.encoder = model_fun(selfcon_pos=selfcon_pos, selfcon_arch=selfcon_arch, selfcon_size=selfcon_size, dataset=dataset)
        if head == 'linear':
            self.head = nn.Linear(dim_in, feat_dim)
            
            self.sub_heads = []
            for pos in selfcon_pos:
                if pos:
                    self.sub_heads.append(nn.Linear(dim_in, feat_dim))
        elif head == 'mlp':
            self.head = nn.Sequential(
                nn.Linear(dim_in, dim_in),
                nn.ReLU(inplace=True),
                nn.Linear(dim_in, feat_dim)
            )
            
            heads = []
            for pos in selfcon_pos:
                if pos:
                    heads.append(nn.Sequential(
                        nn.Linear(dim_in, dim_in),
                        nn.ReLU(inplace=True),
                        nn.Linear(dim_in, feat_dim)
                    ))
            self.sub_heads = nn.ModuleList(heads)
        else:
            raise NotImplementedError(
                'head not supported: {}'.format(head))

    def forward(self, x):
        sub_feat, feat = self.encoder(x)
        print(sub_feat,feat)
        print("******")
        
        sh_feat = []
        for sf, sub_head in zip(sub_feat, self.sub_heads):
            print(sf,sub_head,sub_feat,self.sub_heads)
            sh_feat.append(F.normalize(sub_head(sf), dim=1))
        print("***")
        print(sh_feat)
        print("*************************")
        input()
        
        feat = F.normalize(self.head(feat), dim=1)
        return sh_feat, feat

Core Code:

sub_feat, feat = self.encoder(x)
print(sub_feat,feat)
print("******")

sh_feat = []
for sf, sub_head in zip(sub_feat, self.sub_heads):
    print(sf,sub_head,sub_feat,self.sub_heads)
    sh_feat.append(F.normalize(sub_head(sf), dim=1))
print("***")
print(sh_feat)
print("*************************")
input()

feat = F.normalize(self.head(feat), dim=1)
return sh_feat, feat


Files already downloaded and verified
[tensor([[0.3709, 0.4060, 0.4357,  ..., 0.4369, 0.7116, 0.6154],
        [0.3255, 0.8886, 0.4587,  ..., 0.3097, 0.6247, 0.6515],
        [0.3265, 0.5572, 0.5465,  ..., 0.1894, 0.5648, 0.7019],
        ...,
        [0.3285, 0.7454, 0.3656,  ..., 0.4739, 0.4305, 0.3940],
        [4.2099, 2.0158, 2.8063,  ..., 3.5441, 2.7423, 1.8907],
        [5.0028, 4.0435, 6.1597,  ..., 6.2176, 2.4172, 1.5914]],
       device='cuda:0', grad_fn=<ReshapeAliasBackward0>)] tensor([[0.5324, 0.5882, 0.8503,  ..., 0.7225, 0.8470, 0.3471],
        [0.7275, 0.6198, 0.7865,  ..., 0.7751, 0.9618, 0.8199],
        [0.7426, 0.7385, 0.9845,  ..., 0.4950, 0.7898, 0.4606],
        ...,
        [0.6489, 0.7440, 1.0151,  ..., 0.5916, 1.0089, 0.2878],
        [0.6675, 1.0148, 1.7714,  ..., 1.4871, 0.8820, 5.0314],
        [0.7732, 0.9582, 2.3896,  ..., 1.7296, 0.9024, 5.8410]],
       device='cuda:0', grad_fn=<ReshapeAliasBackward0>)
******
tensor([[0.3709, 0.4060, 0.4357,  ..., 0.4369, 0.7116, 0.6154],
        [0.3255, 0.8886, 0.4587,  ..., 0.3097, 0.6247, 0.6515],
        [0.3265, 0.5572, 0.5465,  ..., 0.1894, 0.5648, 0.7019],
        ...,
        [0.3285, 0.7454, 0.3656,  ..., 0.4739, 0.4305, 0.3940],
        [4.2099, 2.0158, 2.8063,  ..., 3.5441, 2.7423, 1.8907],
        [5.0028, 4.0435, 6.1597,  ..., 6.2176, 2.4172, 1.5914]],
       device='cuda:0', grad_fn=<ReshapeAliasBackward0>) Sequential(
  (0): Linear(in_features=2048, out_features=2048, bias=True)
  (1): ReLU(inplace=True)
  (2): Linear(in_features=2048, out_features=128, bias=True)
) [tensor([[0.3709, 0.4060, 0.4357,  ..., 0.4369, 0.7116, 0.6154],
        [0.3255, 0.8886, 0.4587,  ..., 0.3097, 0.6247, 0.6515],
        [0.3265, 0.5572, 0.5465,  ..., 0.1894, 0.5648, 0.7019],
        ...,
        [0.3285, 0.7454, 0.3656,  ..., 0.4739, 0.4305, 0.3940],
        [4.2099, 2.0158, 2.8063,  ..., 3.5441, 2.7423, 1.8907],
        [5.0028, 4.0435, 6.1597,  ..., 6.2176, 2.4172, 1.5914]],
       device='cuda:0', grad_fn=<ReshapeAliasBackward0>)] ModuleList(
  (0): Sequential(
    (0): Linear(in_features=2048, out_features=2048, bias=True)
    (1): ReLU(inplace=True)
    (2): Linear(in_features=2048, out_features=128, bias=True)
  )
)
***
[tensor([[ 0.0400,  0.0202,  0.2806,  ..., -0.0758,  0.0742, -0.0301],
        [ 0.0687,  0.0602,  0.2580,  ..., -0.0850,  0.0880, -0.0687],
        [ 0.0607, -0.0127,  0.2398,  ..., -0.0618,  0.0845, -0.0610],
        ...,
        [ 0.0440,  0.0067,  0.2463,  ..., -0.0795,  0.0996, -0.0472],
        [ 0.1166,  0.0371,  0.1152,  ..., -0.1231, -0.0744,  0.0168],
        [ 0.0630,  0.0231,  0.1265,  ..., -0.1107, -0.0084, -0.0108]],
       device='cuda:0', grad_fn=<DivBackward0>)]
*************************

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

程序猿的探索之路

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值