以下为resnet块的结构(图来源于网络),该结构可以很好的解决梯度消失问题。该文章采用CIFAR10数据集,运用pytorch框架来实战resnet18分类器。
代码:
#1.准备数据集
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
batch_size = 128
transform = transforms.Compose([
transforms.Resize((32,32)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])
])
cifar_train = datasets.CIFAR10('cifar',train=True,transform=transform,download=True)
trainloader = DataLoader(cifar_train,batch_size=batch_size,shuffle=True)
cifar_test = datasets.CIFAR10('cifar',train=False,transform=transform,download=True)
testloader = DataLoader(cifar_test,batch_size=batch_size,shuffle=True)
x,label = iter(trainloader).next()
print(x.shape,label.shape)
#2.定义神经网络
#(1)定义残差结构
class ResBlk(nn.Module):
def __init__(self,ch_in,ch_out):
super(ResBlk,self).__init__()
self.conv1 = nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1)
self.bn1 = nn.BatchNorm2d(ch_out)
self.conv2 = nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1)
self.bn2 = nn.BatchNorm2d(ch_out)
self.extra = nn.Sequential()
if ch_out!=ch_in:
self.extra = nn.Sequential(
nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=1),
nn.BatchNorm2d(ch_out)
)
def forward(self,x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out = self.extra(x) + out
out = F.relu(out)
return out
#(2)定义残差网络
class ResNet18(nn.Module):
def __init__(self):
super(ResNet18,self).__init__()
self.conv1 = nn.Sequential( #预处理层
nn.Conv2d(3,64,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(64)
)
self.blk1 = ResBlk(64,128)
self.blk2 = ResBlk(128,256)
self.blk3 = ResBlk(256,512)
self.blk4 = ResBlk(512,512)
self.fc = nn.Linear(512*1*1,10)
def forward(self,x):
x = F.relu(self.conv1(x))
x = self.blk1(x)
x = self.blk2(x)
x = self.blk3(x)
x = self.blk4(x)
x = F.adaptive_avg_pool2d(x,[1,1]) #最后形成1*1大小
x = x.view(x.size(0),-1)
x = self.fc(x)
return x
#3.定义损失函数和优化器
device = torch.device('cuda')
model = ResNet18().to(device)
criteon = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=1e-3)
#4.训练神经网络
for epoch in range(10):
model.train()
for batchidx , (x,label) in enumerate(trainloader):
x,label = x.to(device),label.to(device)
outputs = model(x)
loss = criteon(outputs,label)
#back
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(epoch,loss.item())
#5.测试
model.eval()
with torch.no_grad():
total_correct = 0
total_num = 0
for x,label in testloader:
x,label = x.to(device),label.to(device)
out = model(x)
pred = out.argmax(dim=1)
total_correct += torch.eq(pred,label).float().sum().item()
total_num += x.size(0)
acc = total_correct /total_num
print(epoch,acc)