前言
之前的文章介绍过Resnet18的网络结构,今天通过pytorch框架实现一下跳连结构的Resnet18网络结构。具体的代码是下面这个链接博主写的,我是进行注释一下,代码不是原创。
PyTorch实现ResNet18_一个不想写代码的程序员的博客-CSDN博客
代码解析
本次构建的Resnet18网络主要分成4个BasicBlock,每一层卷积后面接上了BN层,具体信息看代码,这里我添加了界面化的展示,把注释后的代码取消注释就可以了。
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
import torch
import torch.nn as nn
from torch.nn import functional as F
# 处理输入输出不变的卷积相加,与VGG中的连接一样,但是relu输入时变成 x+output
class RestNetBasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(RestNetBasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, x):
output = self.conv1(x)
output = F.relu(self.bn1(output))
output = self.conv2(output)
output = self.bn2(output)
return F.relu(x + output)
# 处理输入输出变化的卷积层相加
class RestNetDownBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(RestNetDownBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride[0], padding=1)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride[1], padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
# 因为卷积核和长宽变化了,所以定义extra调整x的通道和长宽与out相加
self.extra = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride[0], padding=0),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
extra_x = self.extra(x)
output = self.conv1(x)
out = F.relu(self.bn1(output))
out = self.conv2(out)
out = self.bn2(out)
return F.relu(extra_x + out)
# 定义主网络框架
class RestNet(nn.Module):
def __init__(self):
super(RestNet, self).__init__()
# input:224*224*3 Filter:7*7 step:2 padding=3
# 224*224*3 ---> (224-7+2*3)/2 + 1 = 112 ---> 112*112*64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(64)
# 112*112*64 ---> (112-3+2*1)/2 + 1 = 112
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# input:112*112*64
# 注意:这里定义两个块组成残差块
# RestNetBasicBlock:处理输入输出不变的卷积相加
# RestNetDownBlock:处理输入输出变化的卷积层相加
# 4个layer对应图中4种不同的残差块
self.layer1 = nn.Sequential(RestNetBasicBlock(64, 64, 1),
RestNetBasicBlock(64, 64, 1))
self.layer2 = nn.Sequential(RestNetDownBlock(64, 128, [2, 1]),
RestNetBasicBlock(128, 128, 1))
self.layer3 = nn.Sequential(RestNetDownBlock(128, 256, [2, 1]),
RestNetBasicBlock(256, 256, 1))
self.layer4 = nn.Sequential(RestNetDownBlock(256, 512, [2, 1]),
RestNetBasicBlock(512, 512, 1))
self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.fc = nn.Linear(512, 10)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.reshape(x.shape[0], -1)
out = self.fc(out)
return out
def ResNet18():
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#Net = RestNet()
#net = Net.to(device)
#summary(net, (3, 224, 224))
return RestNet()
#if __name__ == '__main__':
#ResNet18()
所以Resnet18结构也不是这么的复杂,再次感叹,大神真厉害。