从零搭建ResNet

ResNet18

import torch
import torch.nn as nn


# 残差模块
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, outchannels):
        super(ResidualBlock, self).__init__()
        self.channel_equal_flag = True
        if in_channels == outchannels:
            self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=outchannels, kernel_size=3, padding=1,
                                   stride=1)
        else:
            ## 对恒等映射分支的变换,当通道数发生变换时,分辨率变为原来的二分之一
            self.conv1x1 = nn.Conv2d(in_channels=in_channels, out_channels=outchannels, kernel_size=1, stride=2)
            self.bn1x1 = nn.BatchNorm2d(num_features=outchannels)
            self.channel_equal_flag = False

            self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=outchannels, kernel_size=3, padding=1,
                                   stride=2)

        self.bn1 = nn.BatchNorm2d(num_features=outchannels)
        self.relu = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(in_channels=outchannels, out_channels=outchannels, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm2d(num_features=outchannels)

    def forward(self, x):
        identity = x
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = self.relu(x)

        if self.channel_equal_flag == True:
            pass
        else:
            identity = self.conv1x1(identity)
            identity = self.bn1x1(identity)
            identity = self.relu(identity)

        out = identity + x
        return out


class ResNet18(nn.Module):
    def __init__(self, num_classes=1000):
        super(ResNet18, self).__init__()
        # conv1
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3)
        self.bn1 = nn.BatchNorm2d(num_features=64)
        self.relu = nn.ReLU(inplace=True)

        # conv2_x
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.conv2_1 = ResidualBlock(in_channels=64, outchannels=64)
        self.conv2_2 = ResidualBlock(in_channels=64, outchannels=64)

        # conv3_x
        self.conv3_1 = ResidualBlock(in_channels=64, outchannels=128)
        self.conv3_2 = ResidualBlock(in_channels=128, outchannels=128)

        # conv4_x
        self.conv4_1 = ResidualBlock(in_channels=128, outchannels=256)
        self.conv4_2 = ResidualBlock(in_channels=256, outchannels=256)

        # conv5_x
        self.conv5_1 = ResidualBlock(in_channels=256, outchannels=512)
        self.conv5_2 = ResidualBlock(in_channels=512, outchannels=512)

        # avg_pool
        self.avg_pool = nn.AdaptiveAvgPool2d(output_size=(1, 1))  # [N, C, H, W] = [N, 512, 1, 1]

        # fc
        self.fc = nn.Linear(in_features=512, out_features=num_classes)  # [N, num_classes]

        # softmax
        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        # conv1
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)

        # conv2_x
        x = self.maxpool(x)
        x = self.conv2_1(x)
        x = self.conv2_2(x)

        # conv3_x
        x = self.conv3_1(x)
        x = self.conv3_2(x)

        # conv4_x
        x = self.conv4_1(x)
        x = self.conv4_2(x)

        # conv5_x
        x = self.conv5_1(x)
        x = self.conv5_2(x)

        # avgpool + fc + softmax
        x = self.avg_pool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)
        x = self.softmax(x)

        return x


if __name__ == '__main__':
    model = ResNet18(num_classes=1000)
    input = torch.randn([1, 3, 224, 224])
    output = model(input)

    torch.save(model, 'resnet18.pth')
    torch.onnx.export(model, input, 'resnet18.onnx')

ResNet50

import torch
import torch.nn as nn
 
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels, identity_downsample=None, stride=1):
        super(ResidualBlock, self).__init__()
 
        self.expansion = 4
 
        self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=stride,
                               padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.conv3 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels * self.expansion, kernel_size=1,
                               stride=1, padding=0)
        self.bn3 = nn.BatchNorm2d(out_channels * self.expansion)
        self.relu = nn.ReLU()
        self.identity_downsample = identity_downsample
        self.stride = stride
 
    def forward(self, x):
        identity = x.clone()
 
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.conv2(x)
        x = self.bn2(x)
        x = self.relu(x)
        x = self.conv3(x)
        x = self.bn3(x)
 
        if self.identity_downsample is not None:
            identity = self.identity_downsample(identity)
 
        x += identity
        x = self.relu(x)
 
        return x
 
 
class ResNet(nn.Module):
    def __init__(self, block, layers, num_classes):
        super(ResNet, self).__init__()
 
        self.in_channels = 64
 
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU()
        self.pooling = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
 
        self.layer1 = self.res_layer(block, layers[0], out_channels=64, stride=1) #第1组残差模块,整体分辨率不下降
        self.layer2 = self.res_layer(block, layers[1], out_channels=128, stride=2)#第2组残差模块,分辨率下降1/2
        self.layer3 = self.res_layer(block, layers[2], out_channels=256, stride=2)#第2组残差模块,分辨率下降1/2
        self.layer4 = self.res_layer(block, layers[3], out_channels=512, stride=2)#第2组残差模块,分辨率下降1/2
 
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * 4, num_classes)
 
    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.pooling(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.avgpool(x)
 
        x = x.reshape(x.shape[0], -1)
 
        x = self.fc(x)
 
        return x
 
    def res_layer(self, block, num_residual_blocks, out_channels, stride):
        identity_downsample = None
        layers = []
 
        # 每一组残差模块的第一个残差模块需要特殊对待
        if stride != 1 or self.in_channels != out_channels * 4:
            identity_downsample = nn.Sequential(
                nn.Conv2d(
                    self.in_channels,
                    out_channels * 4,
                    kernel_size=1,
                    stride=stride,
                ),
                nn.BatchNorm2d(out_channels * 4),
            )
 
        layers.append(
            block(self.in_channels, out_channels, identity_downsample, stride)
        )
 
        # 除了每一组残差模块的第一个,其他残差模块的输入通道都等于输出通道的4倍,ResNet 50,101,152
        self.in_channels = out_channels * 4 # 该值每经过一组残差模块,就会变大,64 -> 4*64=256 -> 4*128=512 -> 4*256=1024 -> 4*512=2048
        # 如resnet50的conv2_x,通道变换为256 -> 64 -> 256
 
        for i in range(num_residual_blocks - 1):
            layers.append(block(self.in_channels, out_channels))
 
        return nn.Sequential(*layers)
 
 
def ResNet50(num_classes=1000):
    return ResNet(ResidualBlock, [3, 4, 6, 3], num_classes)
 
 
def ResNet101(num_classes=1000):
    return ResNet(ResidualBlock, [3, 4, 23, 3], num_classes)
 
 
def ResNet152(num_classes=1000):
    return ResNet(ResidualBlock, [3, 8, 36, 3], num_classes)
 
 
input = torch.randn([1, 3, 224, 224])
resnet50 = ResNet50(num_classes=1000)
output = resnet50(input)
 
torch.save(resnet50, 'resnet50.pth')
torch.onnx.export(resnet50, input, 'resnet50.onnx')

PyCharm是一个流行的Python IDE(集成开发环境),它支持多种Python开发任务,包括深度学习模型的构建。要使用PyCharm搭建ResNet50模型,你需要先确保已经安装了必要的库,比如TensorFlow或PyTorch。以下是使用PyCharm搭建ResNet50的一个基本流程: 1. 安装依赖库: - 打开PyCharm,确保你已经创建了一个Python项目。 - 在PyCharm的终端中,输入以下命令来安装TensorFlow或PyTorch。 对于TensorFlow,可以使用: ``` pip install tensorflow ``` 对于PyTorch,可以使用: ``` pip install torch torchvision ``` 2. 下载ResNet50模型: - 在PyCharm中编写代码,通过TensorFlow或PyTorch提供的API来下载预训练的ResNet50模型。 例如,在TensorFlow中,你可以使用: ```python import tensorflow as tf from tensorflow.keras.applications.resnet50 import ResNet50 resnet_model = ResNet50(weights='imagenet') ``` 在PyTorch中,可以使用: ```python import torchvision.models as models import torch.nn as nn resnet50 = models.resnet50(pretrained=True) ``` 3. 使用或修改ResNet50模型: - 根据你的需求,你可能需要对模型进行微调(fine-tuning),使用自己的数据集来训练模型。 - 你可以使用模型进行预测,或者添加自定义的层来进行特定任务,如分类、检测等。 4. 编写训练和测试代码: - 编写代码来加载和预处理你的数据集。 - 设置训练循环,使用适当的损失函数和优化器来训练模型。 - 在测试集上评估模型性能。 5. 运行和调试: - 运行你的代码,查看模型的训练过程和结果。 - 使用PyCharm的调试工具来帮助找出和解决可能出现的代码问题。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值