ResNext(50和101)模型构建(可以使用torchvision预训练权重)

ResNeXt是一种卷积神经网络,它由Xie等人在论文《Aggregated Residual Transformations for Deep Neural Networks》中提出¹。ResNeXt结合了ResNet和Inception的优点,但不同于Inception v4,ResNeXt不需要人工设计复杂的Inception结构细节,而是每一个分支都采用相同的拓扑结构¹。
ResNeXt的本质是分组卷积(Group Convolution),通过变量基数(Cardinality)来控制组的数量¹。与ResNet相比,它增加了一个新维度——基数(一组转换的大小),作为深度和宽度之外的一个重要因素²。
(1) ResNeXt详解 - 知乎. https://zhuanlan.zhihu.com/p/51075096.
(2) ResNeXt Explained | Papers With Code. https://paperswithcode.com/method/resnext.
(3) ResNext | PyTorch. https://pytorch.org/hub/pytorch_vision_resnext/.
ResNext的代码主要参考了ResNet的构建,ResNet代码可以参考这一篇博客。
https://blog.csdn.net/qq_44733260/article/details/131340430

import math
import numpy as np
import torch.nn as nn
from torch.hub import load_state_dict_from_url
import torchvision.models as models
from torchsummary import summary

class Bottleneck(nn.Module):
    expansion = 4
    def __init__(self,inplanes,planes,stride=1,downsample=None,groups=1,base_width=4,base_channels=64):
        super(Bottleneck,self).__init__()
        if groups==1:
            width = planes*2
        else:
            width = (math.floor(planes *(base_width / base_channels)) * groups)*2
        self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, stride=stride,bias=False)
        self.bn1 = nn.BatchNorm2d(width)
        self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=1, padding=1,groups=32,bias=False)
        self.bn2 = nn.BatchNorm2d(width)
        self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1,stride=1,bias=False)
        self.bn3 = nn.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
    def forward(self, x):
        residual = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)
        out = self.conv3(out)
        out = self.bn3(out)
        if self.downsample is not None:
            residual = self.downsample(x)
        out += residual
        out = self.relu(out)
        return out
        
class Bottleneck_50(Bottleneck):
    def __init__(self,inplanes,planes,stride=1,downsample=None,groups=1,base_width=4,base_channels=64):
        super().__init__(inplanes,planes,stride,downsample,groups,base_width,base_channels)
class Bottleneck_101(Bottleneck):
    def __init__(self,inplanes,planes,stride=1,downsample=None,groups=32,base_width=4,base_channels=64):
        super().__init__(inplanes,planes,stride,downsample,groups,base_width,base_channels)
class ResNext(nn.Module):
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNext, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)

        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion*9, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        #-------------------------------------------------------------------#
        #   当模型需要进行高和宽的压缩的时候,就需要用到残差边的downsample
        #-------------------------------------------------------------------#
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )
        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample=downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))
        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        print(x.shape)
        x = self.fc(x)
        return x
arch_settings = {
        'resnext50': (Bottleneck_50, (3, 4, 6, 3)),
        'resnext101': (Bottleneck_101, (3, 4, 23, 3))
    }
def resnext(depth,pretrained = False):
    
    if depth not in arch_settings:
            raise KeyError(f'invalid depth {depth} for resnet')
    Bottleneck,stage_blocks = arch_settings[depth]
    model = ResNext(Bottleneck, stage_blocks)
    if pretrained and depth=="resnext50":
        resnext50 = models.resnext50_32x4d(pretrained=True)
        pretrained_dict = resnext50.state_dict()
        model_dict = model.state_dict()
        load_key, no_load_key, temp_dict = [], [], {}
        for k, v in pretrained_dict.items():
            if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
                temp_dict[k] = v
                load_key.append(k)
            else:
                no_load_key.append(k)
        model_dict.update(temp_dict)
        model.load_state_dict(model_dict)
        print("load over")
        print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key))
        print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key))
    elif pretrained and depth == "resnext101":
        resnext101 = models.resnext101_32x8d(pretrained=True)
        pretrained_dict = resnext101.state_dict()
        model_dict = model.state_dict()
        load_key, no_load_key, temp_dict = [], [], {}
        for k, v in pretrained_dict.items():
            if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
                temp_dict[k] = v
                load_key.append(k)
            else:
                no_load_key.append(k)
        model_dict.update(temp_dict)
        model.load_state_dict(model_dict)
#         print("load over")
        print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key))
        print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key))
    return model
if __name__=="__main__":
    model = resnext('resnext50',pretrained=False).cuda()
    model = model.cuda()
    summary(model,(3,640,640))
  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
ResNeXt101是一种深度学习模型的名称,它是由Facebook AI Research(FAIR)提出的。预训练权重指的是在大规模数据集上预先训练好的模型参数,用于在特定任务上进行微调或迁移学习。 要下载ResNeXt101预训练权重,可以采取以下步骤: 1. 确保已经安装了Python和相关的深度学习库,例如PyTorch或TensorFlow,以便能够加载、使用和操作权重文件。 2. 打开FAIR的官方网站或它们在GitHub上公开的代码库,搜索ResNeXt101模型的相关信息。可能需要查找模型名称、版本和相关的文件或文档。 3. 在FAIR的网站或GitHub页面上,寻找或点击下载按钮,以获取ResNeXt101预训练权重。通常,它们以一个或多个权重文件的形式提供。 4. 在下载权重文件时,务必注意选择与所使用深度学习框架兼容的版本。某些权重文件可能与PyTorch和TensorFlow等不同的框架版本不兼容。 5. 下载完权重文件后,将其保存到合适的目录中,以便在代码中加载和使用。 6. 在代码中使用所选的深度学习框架,加载ResNeXt101预训练权重。具体的代码和操作将取决于所使用的框架。可以查阅深度学习框架的文档或FAIR的代码库中的示例代码来了解如何加载权重。 总之,要下载ResNeXt101预训练权重,需要找到FAIR官方网站或GitHub页面上的相关信息,并根据提示下载合适版本的权重文件。之后,可以在所选的深度学习框架中加载这些权重并在你的任务中进行微调或迁移学习。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值