上一步已经实现了读取图片的功能(pokemon写成了pokeman,已经修改),只需要完善网络模型进行训练即可
第一步:基于已完成的ResNet模型进行修改
import torch
from torch import nn
from torch.nn import functional as F
class ResBlk(nn.Module):
def __init__(self,ch_in,ch_out,stride=1):
super(ResBlk,self).__init__()
self.conv1 = nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1)
self.bn1 = nn.BatchNorm2d(ch_out)
self.conv2 = nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1)
self.bn2 = nn.BatchNorm2d(ch_out)
self.extra = nn.Sequential()
if ch_out != ch_in:
self.extra = nn.Sequential(
nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride),
nn.BatchNorm2d(ch_out)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
#[b, ch_in, h, w] = > [b, ch_out, h, w]
out = self.extra(x) + out
out = F.relu((out))
return out
class ResNet18(nn.Module):
def __init__(self,num_class):
super(ResNet18, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0),
nn.BatchNorm2d(64)
)
# followed 4 blocks
# [b, 64, h, w] => [b, 128, h ,w]
self.blk1 = ResBlk(64,128)
# [b, 128, h, w] => [b, 256, h ,w]
self.blk2 = ResBlk(128,256)
# [b, 256, h, w] => [b, 512, h ,w]
self.blk3 = ResBlk(256,512)
# [b, 512, h, w] => [b, 1024, h ,w]
self.blk4 = ResBlk(512,512)
self.outlayer = nn.Linear(512*1*1,num_class)
def forward(self,x):
x = F.relu(self.conv1(x))
x = self.blk1(x)
x = self.blk2(x)
x = self.blk3(x)
x = self.blk4(x)
print('after conv:', x.shape)
# [b, 512, h, w] => [b, 512, 1, 1]
x = F.adaptive_avg_pool2d(x, [1, 1])
print('after pool:', x.shape)
x = x.view(x.size(0), -1)
x = self.outlayer(x)
return x
def main():
blk = ResBlk(64,128,stride=2)
tmp = torch.randn(2,64,224,224)
out = blk(tmp)
print('block:',out.shape)
model = ResNet18(5)
tmp = torch.randn(2,3,224,224)
out = model(tmp)
print('resnet:',out.shape)
p = sum(map(lambda p:p.numel(), model.parameters()))
print('parameters size:',p)
if __name__ == '__main__':
main()
第二步:绘制训练、验证、测试集训练过程
import torch
from torch import optim,nn
import visdom
import torchvision
from torch.utils.data import DataLoader
from pokemon import Pokemon
from resnet import ResNet18
batchsz = 32
lr = 1e-3
epochs = 10
device = torch.device('cuda')
torch.manual_seed(1234)
train_db = Pokemon('pokemon', 224, mode='train')
val_db = Pokemon('pokemon', 224, mode='val')
test_db = Pokemon('pokemon', 224, mode='test')
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True,
num_workers=4)
val_loader = DataLoader(val_db, batch_size=batchsz, num_workers=2)
test_loader = DataLoader(test_db, batch_size=batchsz, num_workers=2)
def evalute(model, loader):
model.eval()
correct = 0
total = len(loader.dataset)
for x,y in loader:
x,y = x.to(device),y.to(device)
with torch.no_grad():
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred,y).sum().float().item()
return correct/total
def main():
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
model = ResNet18(5).to(device)
optimizer = optim.Adam(model.parameters(),lr=lr)
criteon = nn.CrossEntropyLoss()
best_acc,best_epoch = 0,0
for epoch in range(epochs):
for step,(x,y) in enumerate(train_loader):
# x:[b,3,224,224]
x,y = x.to(device),y.to(device)
model.train()
logits = model(x)
loss = criteon(logits,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 2 ==0:
val_acc = evalute(model,val_loader)
if val_acc>best_acc:
best_epoch = epoch
best_acc = val_acc
torch.save(model.state_dict(),'best.mdl')
print('best acc:', best_acc, 'best epoch:', best_epoch)
model .load_state_dict(torch.load('best.mdl'))
print('loaded from ckpt')
test_acc = evalute(model,test_loader)
print('test acc:',test_acc)
if __name__ == '__main__':
main()
排查错误1
这里代码运行出现问题:报错如下
经过排查,发现pokemon中对原始数据划分有误,太粗心了!
排查错误2
接着训练过程中爆显存了,考虑到降低resnet训练时的参数数量
经过简单修改后的resnet模型代码:
import torch
from torch import nn
from torch.nn import functional as F
class ResBlk(nn.Module):
def __init__(self,ch_in,ch_out,stride=1):
super(ResBlk,self).__init__()
self.conv1 = nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1)
self.bn1 = nn.BatchNorm2d(ch_out)
self.conv2 = nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1)
self.bn2 = nn.BatchNorm2d(ch_out)
self.extra = nn.Sequential()
if ch_out != ch_in:
self.extra = nn.Sequential(
nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride),
nn.BatchNorm2d(ch_out)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
#[b, ch_in, h, w] = > [b, ch_out, h, w]
out = self.extra(x) + out
out = F.relu((out))
return out
class ResNet18(nn.Module):
def __init__(self,num_class):
super(ResNet18, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3,16,kernel_size=3,stride=3,padding=0),
nn.BatchNorm2d(16)
)
# followed 4 blocks
# [b, 16, h, w] => [b, 32, h ,w]
self.blk1 = ResBlk(16, 32, stride=3)
# [b, 32, h, w] => [b, 64, h, w]
self.blk2 = ResBlk(32, 64, stride=3)
# # [b, 64, h, w] => [b, 128, h, w]
self.blk3 = ResBlk(64, 128, stride=2)
# # [b, 128, h, w] => [b, 256, h, w]
self.blk4 = ResBlk(128, 256, stride=2)
# [b, 256, 7, 7]
self.outlayer = nn.Linear(256*3*3,num_class)
def forward(self,x):
x = F.relu(self.conv1(x))
x = self.blk1(x)
x = self.blk2(x)
x = self.blk3(x)
x = self.blk4(x)
print(x.shape)
x = x.view(x.size(0), -1)
x = self.outlayer(x)
return x
def main():
blk = ResBlk(64,128,stride=2)
tmp = torch.randn(2,64,224,224)
out = blk(tmp)
print('block:',out.shape)
model = ResNet18(5)
tmp = torch.randn(2,3,224,224)
out = model(tmp)
print('resnet:',out.shape)
p = sum(map(lambda p:p.numel(), model.parameters()))
print('parameters size:',p)
if __name__ == '__main__':
main()
可视化损失函数和准确率:
import torch
from torch import optim, nn
import visdom
import torchvision
from torch.utils.data import DataLoader
from pokemon import Pokemon
from resnet import ResNet18
batchsz = 32
lr = 1e-3
epochs = 10
device = torch.device('cuda')
torch.manual_seed(1234)
train_db = Pokemon('pokemon', 224, mode='train')
val_db = Pokemon('pokemon', 224, mode='val')
test_db = Pokemon('pokemon', 224, mode='test')
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True,
num_workers=4)
val_loader = DataLoader(val_db, batch_size=batchsz, num_workers=2)
test_loader = DataLoader(test_db, batch_size=batchsz, num_workers=2)
viz = visdom.Visdom()
def evalute(model, loader):
model.eval()
correct = 0
total = len(loader.dataset)
for x, y in loader:
x, y = x.to(device), y.to(device)
with torch.no_grad():
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
return correct / total
def main():
model = ResNet18(5).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
criteon = nn.CrossEntropyLoss()
best_acc, best_epoch = 0, 0
global_step = 0
viz.line([0], [-1], win='loss', opts=dict(title='loss'))
viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
for epoch in range(epochs):
for step, (x, y) in enumerate(train_loader):
# x: [b, 3, 224, 224], y: [b]
x, y = x.to(device), y.to(device)
model.train()
logits = model(x)
loss = criteon(logits, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
viz.line([loss.item()], [global_step], win='loss', update='append')
global_step += 1
if epoch % 1 == 0:
val_acc = evalute(model, val_loader)
if val_acc > best_acc:
best_epoch = epoch
best_acc = val_acc
torch.save(model.state_dict(), 'best.mdl')
viz.line([val_acc], [global_step], win='val_acc', update='append')
print('best acc:', best_acc, 'best epoch:', best_epoch)
model.load_state_dict(torch.load('best.mdl'))
print('loaded from ckpt!')
test_acc = evalute(model, test_loader)
print('test acc:', test_acc)
if __name__ == '__main__':
main()