代码来源:PyTorch学习仓库:https://github.com/yunjey/pytorch-tutorial
Resnet是何神提出的深度残差网络,旨在减少网络深度对过拟合的影响,采用深度残差进行网络学习F(x)-x
在进行代码编写的时候,注意resnet的几个小点:
1.由于我这里只用了basicblock,也只写了basicblock,restnet50及以上使用了bottleblock。这里代码可以看做是resnet最初级的版本吧
2.看代码的时候,downsample很难理解,要结合网络的结构,downsample是在输入维度和输出维度不一致时,进行的下采样操作
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
num_epochs = 80
learning_rate = 0.001
transform = transforms.Compose([
transforms.Pad(4),#将给定的所有边用给定的pad value填充
transforms.RandomHorizontalFlip(),#随机水平翻转给定的图片,概率为0.5
transforms.RandomCrop(32),#切割点位置随机选取
transforms.ToTensor()])
# CIFAR-10 dataset
train_dataset = torchvision.datasets.CIFAR10(root='./data/', #存放根目录位置
train=True, #下载训练集
transform=transform, #图片预处理方式
download=True) #判断是否下载,如果下载过就不下载
test_dataset = torchvision.datasets.CIFAR10(root='./data/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=100, #每个batch加载多少个样本
shuffle=True) #设置为True时会在每个epoch重新打乱数据
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=100,
shuffle=False)
# def imshow(img):
#
# npimg = img.numpy()
# plt.imshow(np.transpose(npimg,(1,2,0)))
# plt.show()
# dataiter = iter(train_loader)
# images,labels = dataiter.next()
# print(labels)
# imshow(torchvision.utils.make_grid(images))
def conv3x3(in_channels,out_channels,stride=1):
return nn.Conv2d(in_channels,out_channels,kernel_size=3,stride=stride,padding=1,bias=False)#bias是否添加偏置
class ResidualBlock(nn.Module):#
def __init__(self,in_channels,out_channels,stride=1,downsample=None):#默认不进行下采样
super(ResidualBlock,self).__init__()
self.conv1 = conv3x3(in_channels,out_channels,stride)
self.bn1=nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(out_channels, out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.downsample = downsample #shortcut操作
def forward(self,x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample:#当shortcut存在的时候
residual = self.downsample(x)
#我们将上一层的输出x输入进这个downsample所拥有的一些操作(卷积等),将结果赋给residual
#简单说,这个目的就是为了应对上下层输出输入深度不一致问题
out+=residual #将bn2的输出和shortcut过来加一起
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,block,layers,num_classes=10):
super(ResNet,self).__init__()
self.in_channels=16
self.conv = conv3x3(3,16)
self.bn = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self.make_layer(block,16,layers[0]) #layers[0]次数的block操作
self.layer2 = self.make_layer(block,32,layers[1],2)
self.layer3 = self.make_layer(block,64,layers[2],2)
self.avg_pool = nn.AvgPool2d(8)
self.fc = nn.Linear(64,num_classes)
def make_layer(self,block,out_channels,blocks,stride=1):
downsample = None
if(stride !=1)or(self.in_channels!=out_channels): #如果步长不等于1或者输入通道不等于输出通道时
downsample = nn.Sequential(
conv3x3(self.in_channels,out_channels,stride=stride),
nn.BatchNorm2d(out_channels)#通过下采样使得通道数一致
)
layers = []
layers.append(block(self.in_channels,out_channels,stride,downsample))
self.in_channels = out_channels
for i in range(1,blocks):
layers.append(block(out_channels,out_channels))
return nn.Sequential(*layers)
def forward(self,x):
out = self.conv(x)
out = self.bn(out)
out = self.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.avg_pool(out)
out = out.view(out.size(0),-1)
out = self.fc(out)
return out
model = ResNet(ResidualBlock,[2,2,2]).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)
#自动更新权重
def update_lr(optimizer,lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
total_step = len(train_loader)
curr_lr = learning_rate
for epoch in range(num_epochs):
for i,(images,labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))
# Decay learning rate
if (epoch + 1) % 20 == 0: #每20回合学习率减少三分之一
curr_lr /= 3
update_lr(optimizer, curr_lr)
# Test the model
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the model on the test images: {} %'.format(100 * correct / total))