多段监督,可堆叠多个ResNet-50的Pose Estimation检测器

多段监督,可堆叠多个ResNet-50的Pose Estimation检测器

这里只展示模型主要类,其它主要内容请参考pose-resnet项目代码

该项目主要来自pose_resnet的改编
能够实现多段监督,多个ResNet50堆叠,实现特征聚合。

主要参考pose_resnet:

https://github.com/Microsoft/human-pose-estimation.pytorch

PoseResNet

可堆叠多个Resnet50 修改参数 stacks 即可
模型主函数代码

class PoseResNet(nn.Module):

    def __init__(self, block, layers, cfg, stacks = 3,**kwargs):
        self.inplanes = 64
        self.temp_inplanes = 64
        self.stacks = stacks
        extra = cfg.MODEL.EXTRA
        self.deconv_with_bias = extra.DECONV_WITH_BIAS
        super(PoseResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        layer1,layer2,layer3,layer4,deconv_,final,fc_,secore_,L1_feate,L2_feate,L3_feate,L4_feate= [],[],[],[],[],[],[],[],[],[],[],[]
        for i in range(self.stacks):
            self.inplanes = 64
            layer1.append(self._make_layer(block, 64, layers[0]))
            layer2.append(self._make_layer(block, 128, layers[1], stride=2))
            layer3.append(self._make_layer(block, 256, layers[2], stride=2))
            layer4.append(self._make_layer(block, 512, layers[3], stride=2))
            deconv_.append(self._make_deconv_layer(
                extra.NUM_DECONV_LAYERS,
                extra.NUM_DECONV_FILTERS,
                extra.NUM_DECONV_KERNELS,
            ))
            final.append(nn.Conv2d(
                in_channels=extra.NUM_DECONV_FILTERS[-1],
                out_channels=cfg.MODEL.NUM_JOINTS,
                kernel_size=extra.FINAL_CONV_KERNEL,
                stride=1,
                padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0
            ))
            if i < self.stacks - 1 :
                L1_feate.append(Bottleneck_2(ch_in=256,ch_out=256))
                L2_feate.append(Bottleneck_2(ch_in=512, ch_out=512))
                L3_feate.append(Bottleneck_2(ch_in=1024, ch_out=1024))
                L4_feate.append(Bottleneck_2(ch_in=2048, ch_out=2048))
                fc_.append(Bottleneck_2(ch_in=256,ch_out=64))
                secore_.append(Bottleneck_2(ch_in=17,ch_out=64))

        self.layer1 = nn.ModuleList(layer1)
        self.layer2 = nn.ModuleList(layer2)
        self.layer3 = nn.ModuleList(layer3)
        self.layer4 = nn.ModuleList(layer4)
        self.deconv_ = nn.ModuleList(deconv_)
        self.final = nn.ModuleList(final)
        self.L1_feate = nn.ModuleList(L1_feate)
        self.L2_feate = nn.ModuleList(L2_feate)
        self.L3_feate = nn.ModuleList(L3_feate)
        self.L4_feate = nn.ModuleList(L4_feate)
        self.fc_ = nn.ModuleList(fc_)
        self.secore_ = nn.ModuleList(secore_)


    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

    def _get_deconv_cfg(self, deconv_kernel, index):
        if deconv_kernel == 4:
            padding = 1
            output_padding = 0
        elif deconv_kernel == 3:
            padding = 1
            output_padding = 1
        elif deconv_kernel == 2:
            padding = 0
            output_padding = 0
        return deconv_kernel, padding, output_padding

    def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
        assert num_layers == len(num_filters), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'
        assert num_layers == len(num_kernels), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'

        layers = []
        self.temp_inplanes = self.inplanes
        for i in range(num_layers):
            kernel, padding, output_padding = \
                self._get_deconv_cfg(num_kernels[i], i)

            planes = num_filters[i]
            layers.append(
                nn.ConvTranspose2d(
                    in_channels=self.inplanes,
                    out_channels=planes,
                    kernel_size=kernel,
                    stride=2,
                    padding=padding,
                    output_padding=output_padding,
                    bias=self.deconv_with_bias))
            layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
            layers.append(nn.ReLU(inplace=True))
            self.inplanes = planes
        self.inplanes = self.temp_inplanes
        return nn.Sequential(*layers)

    def forward(self, x):
        out = []
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        stack_in = self.maxpool(x)

        '''
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.deconv_layers(x)
        x = self.final_layer(x)
        
        '''
        L2_feature = 0
        L3_feature = 0
        L4_feature = 0
        deconv_feature = 0
        for i in range(self.stacks):
            L1_out = self.layer1[i](stack_in)
            if i > 0 :
                L1_out = L1_out +L2_feature
            L2_out = self.layer2[i](L1_out)
            if i > 0:
                L2_out = L2_out + L3_feature
            L3_out = self.layer3[i](L2_out)
            if i > 0:
                L3_out = L3_out + L4_feature
            L4_out = self.layer4[i](L3_out)
            if i > 0:
                L4_out = L4_out + deconv_feature
            deconv_out = self.deconv_[i](L4_out)
            final_out = self.final[i](deconv_out)
            out.append(final_out)
            if i < self.stacks - 1:
                L2_feature = self.L1_feate[i](L1_out)
                L3_feature = self.L2_feate[i](L2_out)
                L4_feature = self.L3_feate[i](L3_out)
                deconv_feature = self.L4_feate[i](L4_out)
                deconv_ff = self.fc_[i](deconv_out)
                secore_out = self.secore_[i](final_out)
                stack_in = stack_in + deconv_ff + secore_out
        return out

一些简单block模块的定义:

def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
                     padding=1, bias=False)


class BasicBlock(nn.Module):
    expansion = 1

    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(BasicBlock, self).__init__()
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out


class Bottleneck(nn.Module):
    expansion = 4

    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                               padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
        self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(planes * self.expansion,
                                  momentum=BN_MOMENTUM)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out

class Bottleneck_2(nn.Module):
    def __init__(self, ch_in, ch_out, stride=1, downsample=None):
        super(Bottleneck_2, self).__init__()
        self.ch_in = ch_in
        self.ch_out = ch_out
        self.conv1 = nn.Conv2d(self.ch_in, self.ch_out, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(self.ch_out, momentum=BN_MOMENTUM)
        self.conv2 = nn.Conv2d(self.ch_out, self.ch_out, kernel_size=3, stride=stride,
                               padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(self.ch_out, momentum=BN_MOMENTUM)
        self.conv3 = nn.Conv2d(self.ch_out, self.ch_out, kernel_size=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(ch_out,
                                  momentum=BN_MOMENTUM)
        self.relu = nn.ReLU(inplace=True)
        if self.ch_in != self.ch_out:
            self.conv4 = nn.Conv2d(self.ch_in, self.ch_out, kernel_size=1, bias=False)

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)
        if self.ch_in != self.ch_out:
            residual = self.conv4(residual)
        out += residual
        out = self.relu(out)
        return out



  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值