最直接的ResNet-34 PyTorch代码

最直接的ResNet-34 PyTorch代码,根据官方的源码手写的,名称对应关系:

官方是

本代码去掉了层和子层之间的点号‘.’

 这样做的好处在于名称和官方有一一对应关系,可以方便用load_state_dict()装载云训练参数。下面是具体代码(代码有点长)

# -*- coding: utf-8 -*-
import torch
from torchvision.models.resnet import resnet34
import torch.nn as nn
from collections import OrderedDict
import torch 

class MyResNetD(nn.Module):
    # OutChannal represents kernal size.
    def __init__(self):
        super(MyResNetD, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)        
        # layer1 0
        self.layer10conv1=nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer10bn1=nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer10relu=nn.ReLU(inplace=True)     
        self.layer10conv2=nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer10bn2=nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

                # layer1 1
        self.layer11conv1=nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer11bn1=nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer11relu=nn.ReLU(inplace=True)     
        self.layer11conv2=nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer11bn2=nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        
                # layer1 2
        self.layer12conv1=nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer12bn1=nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer12relu=nn.ReLU(inplace=True)     
        self.layer12conv2=nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer12bn2=nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        
                # layer2 0
        self.layer20conv1=nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
        self.layer20bn1=nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer20relu=nn.ReLU(inplace=True)     
        self.layer20conv2=nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer20bn2=nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer20downsample0=nn.Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
        self.layer20downsample1=nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

                 # layer2 1
        self.layer21conv1=nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer21bn1=nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer21relu=nn.ReLU(inplace=True)     
        self.layer21conv2=nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer21bn2=nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        
                 # layer2 2
        self.layer22conv1=nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer22bn1=nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer22relu=nn.ReLU(inplace=True)     
        self.layer22conv2=nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer22bn2=nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
                # layer2 3
        self.layer23conv1=nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer23bn1=nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer23relu=nn.ReLU(inplace=True)     
        self.layer23conv2=nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer23bn2=nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
           

        # layer3 0
        self.layer30conv1=nn.Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
        self.layer30bn1=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer30relu=nn.ReLU(inplace=True)     
        self.layer30conv2=nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer30bn2=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer30downsample0=nn.Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
        self.layer30downsample1=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

        # layer 3 1
        self.layer31conv1=nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer31bn1=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer31relu=nn.ReLU(inplace=True)     
        self.layer31conv2=nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer31bn2=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  
          # layer 3 2
        self.layer32conv1=nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer32bn1=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer32relu=nn.ReLU(inplace=True)     
        self.layer32conv2=nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer32bn2=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          # layer 3 3
        self.layer33conv1=nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer33bn1=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer33relu=nn.ReLU(inplace=True)     
        self.layer33conv2=nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer33bn2=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  
        # layer 3 4
        self.layer34conv1=nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer34bn1=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer34relu=nn.ReLU(inplace=True)     
        self.layer34conv2=nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer34bn2=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  
        # layer 3 5
        self.layer35conv1=nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer35bn1=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer35relu=nn.ReLU(inplace=True)     
        self.layer35conv2=nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer35bn2=nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  
        # layer 4 0
        self.layer40conv1=nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
        self.layer40bn1=nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer40relu=nn.ReLU(inplace=True)     
        self.layer40conv2=nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer40bn2=nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer40downsample0=nn.Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
        self.layer40downsample1=nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

      # layer 4 1
        self.layer41conv1=nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer41bn1=nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer41relu=nn.ReLU(inplace=True)     
        self.layer41conv2=nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer41bn2=nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          # layer 4 2
        self.layer42conv1=nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer42bn1=nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.layer42relu=nn.ReLU(inplace=True)     
        self.layer42conv2=nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        self.layer42bn2=nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
       
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512, 1000)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x) 
        
        # layer1 0
        identity=x
        x=self.layer10conv1(x)
        x=self.layer10bn1(x)
        x=self.layer10relu(x)
        x=self.layer10conv2(x)
        x=self.layer10bn2(x)
        x+=identity
        x=self.layer10relu(x)

        # layer1 1
        identity=x
        x=self.layer11conv1(x)
        x=self.layer11bn1(x)
        x=self.layer11relu(x)
        x=self.layer11conv2(x)
        x=self.layer11bn2(x)
        x+=identity
        x=self.layer11relu(x)

        # layer1 2
        identity=x
        x=self.layer12conv1(x)
        x=self.layer12bn1(x)
        x=self.layer12relu(x) #relu
        x=self.layer12conv2(x)
        x=self.layer12bn2(x)
        x+=identity
        x=self.layer12relu(x)#relu

        # layer2 0
        identity=x       
        x=self.layer20conv1(x)
        x=self.layer20bn1(x)
        x=self.layer20relu(x)#relu
        x=self.layer20conv2(x)
        x=self.layer20bn2(x)        
        identity=self.layer20downsample0(identity)
        identity=self.layer20downsample1(identity)
        x+=identity
        x=self.layer20relu(x)#relu

        # layer2 1
        identity=x       
        x=self.layer21conv1(x)
        x=self.layer21bn1(x)
        x=self.layer21relu(x)#relu
        x=self.layer21conv2(x)
        x=self.layer21bn2(x)   
        x+=identity
        x=self.layer20relu(x)#relu

  # layer2 2
        identity=x       
        x=self.layer22conv1(x)
        x=self.layer22bn1(x)
        x=self.layer22relu(x)#relu
        x=self.layer22conv2(x)
        x=self.layer22bn2(x)   
        x+=identity
        x=self.layer22relu(x)#relu
  # layer2 3
        identity=x       
        x=self.layer23conv1(x)
        x=self.layer23bn1(x)
        x=self.layer23relu(x)#relu
        x=self.layer23conv2(x)
        x=self.layer23bn2(x)   
        x+=identity
        x=self.layer23relu(x)#relu
      # layer 3 0
        identity=x       
        x=self.layer30conv1(x)
        x=self.layer30bn1(x)
        x=self.layer30relu(x)#relu
        x=self.layer30conv2(x)
        x=self.layer30bn2(x)        
        identity=self.layer30downsample0(identity)
        identity=self.layer30downsample1(identity)
        x+=identity
        x=self.layer30relu(x)#relu
   
      # layer 3 1
        identity=x       
        x=self.layer31conv1(x)
        x=self.layer31bn1(x)
        x=self.layer31relu(x)#relu
        x=self.layer31conv2(x)
        x=self.layer31bn2(x)        
        x+=identity
        x=self.layer31relu(x)#relu

          # layer 3 2
        identity=x       
        x=self.layer32conv1(x)
        x=self.layer32bn1(x)
        x=self.layer32relu(x)#relu
        x=self.layer32conv2(x)
        x=self.layer32bn2(x)        
        x+=identity
        x=self.layer32relu(x)#relu  

      # layer 3 3
        identity=x       
        x=self.layer33conv1(x)
        x=self.layer33bn1(x)
        x=self.layer33relu(x)#relu
        x=self.layer33conv2(x)
        x=self.layer33bn2(x)        
        x+=identity
        x=self.layer33relu(x)#relu 
     
        # layer 3 4
        identity=x       
        x=self.layer34conv1(x)
        x=self.layer34bn1(x)
        x=self.layer34relu(x)#relu
        x=self.layer34conv2(x)
        x=self.layer34bn2(x)        
        x+=identity
        x=self.layer34relu(x)#relu
        # layer 3 5
        identity=x       
        x=self.layer35conv1(x)
        x=self.layer35bn1(x)
        x=self.layer35relu(x)#relu
        x=self.layer35conv2(x)
        x=self.layer35bn2(x)        
        x+=identity
        x=self.layer35relu(x)#relu

        # layer 4 0
        identity=x       
        x=self.layer40conv1(x)
        x=self.layer40bn1(x)
        x=self.layer40relu(x)#relu
        x=self.layer40conv2(x)
        x=self.layer40bn2(x)        
        identity=self.layer40downsample0(identity)
        identity=self.layer40downsample1(identity)
        x+=identity
        x=self.layer40relu(x)#relu

        # layer 4 1
        identity=x       
        x=self.layer41conv1(x)
        x=self.layer41bn1(x)
        x=self.layer41relu(x)#relu
        x=self.layer41conv2(x)
        x=self.layer41bn2(x)
        x+=identity
        x=self.layer41relu(x)#relu
        # layer 4 2
        identity=x       
        x=self.layer42conv1(x)
        x=self.layer42bn1(x)
        x=self.layer42relu(x)#relu
        x=self.layer42conv2(x)
        x=self.layer42bn2(x)
        x+=identity
        x=self.layer42relu(x)#relu

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)

        return x
from torchvision import models
Onet=models.resnet34(pretrained=True) #官方模型,包含了预训练参数
OWeight=Onet.state_dict()

 
net=MyResNetD()  #自己的模型,参数名称变化了
Cweight=net.state_dict()

for k,v in OWeight.items():    
    if k in Cweight.keys():   #加载相同的参数名称
        Cweight[k]=v        
    if k.startswith('layer') and 'downsample' not in k : #加载代码生成的layer但
            kt=k.replace('.','',2)                       #不包含downsample的名称
            Cweight[kt]=v
    if 'downsample' in k : #加载downsample层名称的参数
            kt=k.replace('.','',3)
            Cweight[kt]=v
net.load_state_dict(Cweight)

out=torch.rand(10,3,224,224)
#对比输出结果
out2=net(out)
outO=Onet(out)
print(out2[1,0:10])
print(outO[1,0:10])

用测试数据看看两者是否一样,上述代码输出

上面两个网络输出结果一致,代码改写成功

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值