Pytorch实现残差神经网络(ResNet)

1. 残差块

输入X,经过两次次卷积,一次ReLU,得到F(X),在将XF(X)相加,在经过一个ReLU,即为最后的结果。残差神经网络就是基于残差块的一个深度神经网络。

2. 代码

这篇博客理论涉及较少,主要是代码,在CIFAIR10数据集上,实现一个浅层的残差神经网络用于分类。

import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(torch.cuda.is_available())

num_epochs = 100
batch_size = 100
learning_rate = 0.001

transform = transforms.Compose([
  transforms.Pad(4),
  transforms.RandomHorizontalFlip(),
  transforms.RandomCrop(32),
  transforms.ToTensor()                              
])

train_datatset = torchvision.datasets.CIFAR10(root='../../data/',
                        train=True,
                        transform=transform,
                        download=True,
                        )

test_datatset = torchvision.datasets.CIFAR10(root='../../data/',
                        train=False,
                        transform=transforms.ToTensor()
                        )

train_loader = torch.utils.data.DataLoader(
    dataset=train_datatset,
    batch_size=batch_size,
    shuffle=True
)

test_loader = torch.utils.data.DataLoader(
    dataset=test_datatset,
    batch_size=batch_size,
    shuffle=True
)

for i, (images, labels) in enumerate(train_loader):
  break

print(images.shape)

def conv3x3(in_channels, out_channels, stride=1):
  return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)

class ResidualBlock(nn.Module):
  def __init__(self, in_channels, out_channels, stride=1, downsample=None):
    super(ResidualBlock, self).__init__()
    self.conv1 = conv3x3(in_channels, out_channels, stride)
    self.bn1 = nn.BatchNorm2d(out_channels)
    self.relu = nn.ReLU(inplace=True)
    self.conv2 = conv3x3(out_channels, out_channels)
    self.bn2 = nn.BatchNorm2d(out_channels)
    self.downsample = downsample

  def forward(self, x):
    residual = x
    out = self.conv1(x)
    out = self.bn1(out)
    out = self.relu(out)
    out = self.conv2(out)
    out = self.bn2(out)
    if self.downsample:
      residual = self.downsample(x)
    out += residual
    out = self.relu(out)
    return out

class ResNet(nn.Module):
  def __init__(self, block, layers, num_classes=10):
    super(ResNet, self).__init__()
    self.in_channels = 16
    self.conv = conv3x3(3, 16)
    self.bn = nn.BatchNorm2d(16)
    self.relu = nn.ReLU(inplace=True)
    self.layer1 = self.make_layer(block, 16, layers[0])
    self.layer2 = self.make_layer(block, 32, layers[1], 2)
    self.layer3 = self.make_layer(block, 64, layers[2], 2)
    self.avg_pool = nn.AvgPool2d(8)
    self.fc = nn.Linear(64, num_classes)

  def make_layer(self, block, out_channels, blocks, stride=1):
    downsample = None
    if(stride != 1) or (self.in_channels != out_channels):
      downsample = nn.Sequential(
          conv3x3(self.in_channels, out_channels, stride=stride),
          nn.BatchNorm2d(out_channels)
      )
    layers = []
    layers.append(block(self.in_channels, out_channels, stride, downsample))
    self.in_channels = out_channels
    for i in range(1, blocks):
      layers.append(block(self.in_channels, out_channels))
    return nn.Sequential(*layers)

  def forward(self, x):
    out = self.conv(x)
    out = self.bn(out)
    out = self.relu(out)
    out = self.layer1(out)
    out = self.layer2(out)
    out = self.layer3(out)
    out = self.avg_pool(out)
    out = out.view(out.size(0), -1)
    out = self.fc(out)
    return out

model = ResNet(ResidualBlock, [2, 2, 2]).to(device)

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

def update_lr(optimizer, lr):
  for param_group in optimizer.param_groups:
    param_group['lr'] = lr

total_step = len(train_loader)
curr_lr = learning_rate
for epoch in range(num_epochs):
  for i, (images, labels) in enumerate(train_loader):
    images = images.to(device)
    labels = labels.to(device)

    outputs = model(images)
    loss = criterion(outputs, labels)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    if (i+1)%100 == 0:
      print("Epoch[{}/{}], Step[{}/{}] Loss: {:.4f}"
      .format(epoch+1, num_epochs, i+1, total_step, loss.item()))

  if(epoch+1) % 20 == 0:
    curr_lr /= 3
    update_lr(optimizer, curr_lr)

model.eval()
with torch.no_grad():
  correct = 0
  total = 0
  for images, labels in test_loader:
    images = images.to(device)
    labels = labels.to(device)
    outputs = model(images)
    _, predicted = torch.max(outputs.data, 1)
    total += labels.size(0)
    correct += (predicted == labels).sum().item()

  print("Accuracy of the model on the test images:{}%" .format(100* correct / total))

torch.save(model.state_dict(), 'resnet.ckpt')

 

  • 8
    点赞
  • 44
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
好的,以下是一个基于PyTorch残差神经网络模型的示例: ```python import torch.nn as nn class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, stride=1, downsample=None): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(out_channels) self.downsample = downsample def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=10): super(ResNet, self).__init__() self.in_channels = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.layer1 = self.make_layer(block, 64, layers[0]) self.layer2 = self.make_layer(block, 128, layers[1], stride=2) self.layer3 = self.make_layer(block, 256, layers[2], stride=2) self.layer4 = self.make_layer(block, 512, layers[3], stride=2) self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512, num_classes) def make_layer(self, block, out_channels, blocks, stride=1): downsample = None if stride != 1 or self.in_channels != out_channels: downsample = nn.Sequential( nn.Conv2d(self.in_channels, out_channels, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(out_channels), ) layers = [] layers.append(block(self.in_channels, out_channels, stride, downsample)) self.in_channels = out_channels for i in range(1, blocks): layers.append(block(out_channels, out_channels)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avg_pool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x ``` 此处实现了一个基于ResNet神经网络模型,其中ResidualBlock是一个残差块,ResNet是一个包含多个残差块的神经网络。在ResNet中,首先进行一次卷积操作,然后通过多个残差块进行特征提取,最后通过全局平均池化和全连接层进行分类。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值