【李沐深度学习】61.ResNet 残差模块 代码示例

  • 调已有的接口
import torch
from torch import nn
import torchvision.models as models

class nmv_res50_centernet(nn.Module):
    def __init__(self, cls_num):
        super(nmv_res50_centernet, self).__init__()
        self.resnet50 = models.resnet50(pretrained=True)#使用resnet50作为backbone
        #其他使用到的层设置
		......

	def forward(self, x):
	        #===========backbone============
	        out = self.resnet50.conv1(x)
	        out = self.resnet50.bn1(out)
	        out = self.resnet50.relu(out)
	        out = self.resnet50.maxpool(out)
	        res2 = self.resnet50.layer1(out)
	        res3 = self.resnet50.layer2(res2)
	        res4 = self.resnet50.layer3(res3)
	        #res4_d = res4.detach()#将该层梯度截断不反传
	        #将特征图tensor按n通道均分为2份,若指定大小分割可用split
	        fea_id, fea_attr_det = torch.chunk(res4, 2, dim=0)

			# 其他分类、检测头 分支
			......
  • 自己实现
import torch
from torch import nn
from torch.nn import functional as F

'''
自己实现 残差基本结构
input_channels : 输入的通道数
num_channels : 输出的通道数
use_1x1conv : 残差连接path是否使用1*1conv
strides : 步长,默认为1。 控制输出特征图hw是否缩小
'''
class Residual(nn.Module):
    def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):
        super().__init__()
        self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)
        self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)
        if use_1x1conv: # 跨连path是否使用1*1conv
            self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)
        else:
            self.conv3 = None
        self.bn1 = nn.BatchNorm2d(num_channels)
        self.bn2 = nn.BatchNorm2d(num_channels)
        self.relu = nn.ReLU(inplace=True) # inplace方式节省内存,训练更快

    def forward(self, x):
        y = F.relu(self.bn1(self.conv1(x)))
        y = self.bn2(self.conv2(y))
        if self.conv3:
            x = self.conv3(x)
        y += x # 残差concate
        return F.relu(y)

'''
(叠加的)残差块 实现函数
num_residuals : 残差模块 中 有几个残差子块 叠加
'''
def resnet_block(input_channels, num_channels, num_residuals, first_block=False):
    blk = []
    for i in range(num_residuals):
        if i == 0 and not first_block:
            blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2)) # 非首个个残差块的 第一个残差子块h,w减半
        else:
            blk.append(Residual(num_channels, num_channels)) # 其他残差子块不改变特征图维度
    return blk

if __name__ == '__main__':
    # ResNet
    b1 = nn.Sequential(
        nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=2),
        nn.BatchNorm2d(64), nn.ReLU(),
        nn.MaxPool2d(kernel_size=3, stride=2, padding=1))

    b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True)) # *是对list解引用,取出元素
    b3 = nn.Sequential(*resnet_block(64, 128, 2))
    b4 = nn.Sequential(*resnet_block(128, 256, 2))
    b5 = nn.Sequential(*resnet_block(256, 512, 2))

    # nn.AdaptiveAvgPool2d()自适应池化,输入参数(out_h,out_w),则 n*c*h*w ---> n * c * out_h * out_w
    # nn.Flaten() 默认从第一维c 到 最后一维w压缩到一起,也可自己指定。n*c*h*w ---> n * chw; 若nn.Flaten(1,2) 则n*c*h*w ---> n * ch * w
    net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(), nn.Linear(512, 10))

    x = torch.rand(size=(1, 1, 224, 224))
    for layer in net:
        x = layer(x)
        print(layer.__class__.__name__, 'output shape:\t', x.shape)

结果如下:
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值