self.pose_path = ResNet3dPathway(**pose_pathway)

        print(ResNet3dPathway(**pose_pathway).lateral)
        print(ResNet3dPathway(**pose_pathway).lateral_activate[0]==1)
        print(ResNet3dPathway(**pose_pathway).stage_blocks)
        print(len(ResNet3dPathway(**pose_pathway).stage_blocks))
        print(ResNet3dPathway(**pose_pathway).base_channels * 2**0)
        print(ResNet3dPathway(**pose_pathway).block.expansion)
        
        print(ResNet3dPathway(**pose_pathway).lateral_activate[1])
        print(ResNet3dPathway(**pose_pathway).lateral_inv)
        print(ResNet3dPathway(**pose_pathway).channel_ratio)
        print(ResNet3dPathway(**pose_pathway).lateral_infl)
        print(ResNet3dPathway(**pose_pathway).lateral_connections)

 

True
False
(4, 6, 3)
3
32
4
1
True
4
16
['layer1_lateral', 'layer2_lateral']

print(ResNet3dPathway(**pose_pathway))

(layer1_lateral): DeConvModule(
    (conv): ConvTranspose3d(512, 32, kernel_size=(7, 1, 1), stride=(4, 1, 1), padding=(3, 0, 0), bias=False)
    (bn): BatchNorm3d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (relu): ReLU()
  )
  (layer2_lateral): DeConvModule(
    (conv): ConvTranspose3d(1024, 64, kernel_size=(7, 1, 1), stride=(4, 1, 1), padding=(3, 0, 0), bias=False)
    (bn): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (relu): ReLU()
  )

经过比对发现参数和自己手动计算一致 

https://github.com/kennymckormick/pyskl/blob/main/pyskl/models/cnns/resnet3d_slowfast.py

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,这是 `UResNet_P` 网络的前向传播代码: ```python import torch.nn as nn class UResNet_P(nn.Module): def __init__(self, in_channels=3, out_channels=3, init_features=32): super(UResNet_P, self).__init__() self.edge_detector = Edge_Detector() features = init_features self.pool = nn.MaxPool2d(kernel_size=2, stride=2) self.encoder1 = conv_block(in_channels, features) self.encoder2 = conv_block(features, features * 2) self.encoder3 = conv_block(features * 2, features * 4, dropout=True) self.residual_layer = self.stack_layer(Res_Block, 16) self.conv = conv_block(features * 4, features * 8, dropout=True) self.upconv3 = nn.ConvTranspose2d(features * 8, features * 4, kernel_size=2, stride=2) self.decoder3 = conv_block(features * 8, features * 4) self.upconv2 = nn.ConvTranspose2d(features * 4, features * 2, kernel_size=2, stride=2) self.decoder2 = conv_block(features * 4, features * 2) self.upconv1 = nn.ConvTranspose2d(features * 2, features, kernel_size=2, stride=2) self.decoder1 = conv_block(features * 2, features) self.conv = nn.Conv2d(in_channels=features, out_channels=out_channels, kernel_size=1) self.input = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1, bias=False) self.output = nn.Conv2d(in_channels=32, out_channels=3, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, x): edge = self.edge_detector(x) x = self.input(x) x = self.relu(x) e1 = self.encoder1(x) e2 = self.encoder2(self.pool(e1)) e3 = self.encoder3(self.pool(e2)) r = self.residual_layer(e3) d = self.conv(torch.cat((e3, self.upconv3(r)), dim=1)) d = self.decoder3(d) d = self.upconv2(d) d = self.decoder2(torch.cat((e2, d), dim=1)) d = self.upconv1(d) d = self.decoder1(torch.cat((e1, d), dim=1)) out = self.conv(d) out = self.relu(out) out = self.output(self.pool(out)) return out ``` 这个网络包含了很多层,但是大部分都是相同的结构:卷积层 + ReLU 激活函数,或者是反卷积层。我这里只列出了 `conv_block` 和 `Res_Block` 的代码,因为这两个函数较为复杂,其他层都比较简单。你需要自己实现这两个函数,或者是从其他地方获取它们的代码。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值