1、数据集
按照教程1制作的数据集
2、训练所使用的模型
(1)使用的resnet.py
(2)代码如下:
import torch
from torch import nn
from torch.nn import functional as F
class ResBlk(nn.Module):
"""
resnet block
"""
def __init__(self, ch_in, ch_out, stride=1):
"""
:param ch_in:
:param ch_out:
"""
super(ResBlk, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(ch_out)
self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(ch_out)
self.extra = nn.Sequential()
if ch_out != ch_in:
# [b, ch_in, h, w] => [b, ch_out, h, w]
self.extra = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=stride),
nn.BatchNorm2d(ch_out)
)
def forward(self, x):
"""
:param x: [b, ch, h, w]
:return:
"""
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# short cut.
# extra module: [b, ch_in, h, w] => [b, ch_out, h, w]
# element-wise add:
out = self.extra(x) + out
out = F.relu(out)
return out
class ResNet18(nn.Module):
def __init__(self, num_class):
super(ResNet18, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, stride=3, padding=0),
nn.BatchNorm2d(16)
)
# followed 4 blocks
# [b, 16, h, w] => [b, 32, h ,w]
self.blk1 = ResBlk(16, 32, stride=3)
# [b, 32, h, w] => [b, 64, h, w]
self.blk2 = ResBlk(32, 64, stride=3)
# # [b, 64, h, w] => [b, 128, h, w]
self.blk3 = ResBlk(64, 128, stride=2)
# # [b, 128, h, w] => [b, 256, h, w]
self.blk4 = ResBlk(128, 256, stride=2)
# [b, 256, 7, 7]
self.outlayer = nn.Linear(256*3*3, num_class)
def forward(self, x):
"""
:param x:
:return:
"""
x = F.relu(self.conv1(x))
# [b, 64, h, w] => [b, 1024, h, w]
x = self.blk1(x)
x = self.blk2(x)
x = self.blk3(x)
x = self.blk4(x)
# print(x.shape)
x = x.view(x.size(0), -1)
x = self.outlayer(x)
return x
def main():
blk = ResBlk(64, 128)
tmp = torch.randn(2, 64, 224, 224)
out = blk(tmp)
print('block:', out.shape)
model = ResNet18(5)
tmp = torch.randn(2, 3, 224, 224)
out = model(tmp)
print('resnet:', out.shape)
p = sum(map(lambda p:p.numel(), model.parameters()))
print('parameters size:', p)
'''
map() 函数
描述
map()是 Python 内置的高阶函数,它接收一个函数 f 和一个 list,并通过把函数 f 依次作用在 list 的每个元素上,得到一个新的 list 并返回给迭代器。
语法
map(function,iterable,...)
function -- 函数
iterable -- 一个或多个可迭代序列
返回值 -- 返回迭代器 (返回迭代器意味着print时候需要再转化为list输出)
'''
if __name__ == '__main__':
main()
3、训练所使用的代码visdom版本
3.1如果没安装visdom,安装一下
#(1)如用anaconda激活你自己的环境
# conda env list
# conda activate chentorch_cp310
#(2)安装
# pip install visdom
#(3)使用
# python -m visdom.server
# http://localhost:8097/
3.2代码如下
import torch
from torch import optim, nn
import visdom
# import torchvision
from torch.utils.data import DataLoader
from torchvision import transforms,datasets
# from pokemon import Pokemon
from resnet import ResNet18
from PIL import Image
batchsz = 32
lr = 1e-3
epochs = 10
img_resize = 224
# device = torch.device('cuda')
device = torch.device('cpu')
torch.manual_seed(1234)
# tf = transforms.Compose([
# transforms.Resize((224,224)),
# transforms.ToTensor(),
# ])
#输入应该是PIL.Image类型
tf = transforms.Compose([
#匿名函数
# lambda x:Image.open(x).convert('RGB'), # string path= > image data
transforms.Resize((int(img_resize*1.25), int(img_resize*1.25))),
transforms.RandomRotation(15),
transforms.CenterCrop(img_resize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# db = torchvision.datasets.ImageFolder(root='pokemon', transform=tf)
train_db = datasets.ImageFolder(root='D:/pytorch_learning2022/data/pokeman/train', transform=tf)
print(train_db.class_to_idx)
print("个数")
print(len(train_db))
val_db = datasets.ImageFolder(root='D:/pytorch_learning2022/data/pokeman/val', transform=tf)
test_db = datasets.ImageFolder(root='D:/pytorch_learning2022/data/pokeman/test', transform=tf)
# train_db = Pokemon('pokemon', 224, mode='train')
# val_db = Pokemon('pokemon', 224, mode='val')
# test_db = Pokemon('pokemon', 224, mode='test')
# train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True,
# num_workers=4)
# val_loader = DataLoader(val_db, batch_size=batchsz, num_workers=2)
# test_loader = DataLoader(test_db, batch_size=batchsz, num_workers=2)
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True)
val_loader = DataLoader(val_db, batch_size=batchsz)
test_loader = DataLoader(test_db, batch_size=batchsz)
viz = visdom.Visdom()
def evalute(model, loader):
model.eval()
correct = 0
total = len(loader.dataset)
for x,y in loader:
x,y = x.to(device), y.to(device)
with torch.no_grad():
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
return correct / total
def main():
#(1)如用anaconda激活你自己的环境
# conda env list
# conda activate chentorch_cp310
#(2)安装
# pip install visdom
#(3)使用
# python -m visdom.server
# http://localhost:8097/
model = ResNet18(5).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
criteon = nn.CrossEntropyLoss()
best_acc, best_epoch = 0, 0
global_step = 0
viz.line([0], [-1], win='loss', opts=dict(title='loss'))
viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
for epoch in range(epochs):
for step, (x,y) in enumerate(train_loader):
# x: [b, 3, 224, 224], y: [b]
x, y = x.to(device), y.to(device)
model.train()
logits = model(x)
loss = criteon(logits, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
viz.line([loss.item()], [global_step], win='loss', update='append')
global_step += 1
if epoch % 1 == 0:
val_acc = evalute(model, val_loader)
if val_acc> best_acc:
best_epoch = epoch
best_acc = val_acc
torch.save(model.state_dict(), 'best.mdl')
viz.line([val_acc], [global_step], win='val_acc', update='append')
print('best acc:', best_acc, 'best epoch:', best_epoch)
model.load_state_dict(torch.load('best.mdl'))
print('loaded from ckpt!')
test_acc = evalute(model, test_loader)
print('test acc:', test_acc)
if __name__ == '__main__':
main()
4、训练所使用的代码tensorboardX版本
4.1如果没安装tensorboardx,简单安装一下
(1)
#(1)如用anaconda激活你自己的环境
conda env list
conda activate chentorch_cp310
#(2)安装
pip install tensorboardX
(2)实现流程
from tensorboardX import SummaryWriter #(1)引入tensorboardX
.................
.................
#(2)初始化,注意可以给定路径
writer = SummaryWriter('runs/chen_pokeman_test1')
.................
.................
#(3)将batch中TrainLoss添加到tensorboardX中
writer.add_scalar('TrainLoss', loss.item(), global_step=global_step)
.................
.................
#(4)将epoch中TestAcc添加到tensorboardX中
writer.add_scalar('TestAcc', val_acc, global_step=epoch)
.................
.................
#(5)关闭writer
writer.close()
(6)查看
#(6)查看tensorboardx的方法
#tensorboard --logdir=D:/pytorch_learning2022/3chen_classify_test2022/1pokeman_sample/runs/chen_pokeman_test1
#tensorboard --logdir=runs/chen_pokeman_test1
#http://localhost:6006/
4.2代码实现
import torch
from torch import optim, nn
# import visdom
from tensorboardX import SummaryWriter #(1)引入tensorboardX
# import torchvision
from torch.utils.data import DataLoader
from torchvision import transforms,datasets
# from pokemon import Pokemon
from resnet import ResNet18
from PIL import Image
batchsz = 32
lr = 1e-3
epochs = 10
img_resize = 224
# device = torch.device('cuda')
device = torch.device('cpu')
torch.manual_seed(1234)
# tf = transforms.Compose([
# transforms.Resize((224,224)),
# transforms.ToTensor(),
# ])
#输入应该是PIL.Image类型
tf = transforms.Compose([
#匿名函数
# lambda x:Image.open(x).convert('RGB'), # string path= > image data
transforms.Resize((int(img_resize*1.25), int(img_resize*1.25))),
transforms.RandomRotation(15),
transforms.CenterCrop(img_resize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# db = torchvision.datasets.ImageFolder(root='pokemon', transform=tf)
train_db = datasets.ImageFolder(root='D:/pytorch_learning2022/data/pokeman/train', transform=tf)
print(train_db.class_to_idx)
print("个数")
print(len(train_db))
val_db = datasets.ImageFolder(root='D:/pytorch_learning2022/data/pokeman/val', transform=tf)
test_db = datasets.ImageFolder(root='D:/pytorch_learning2022/data/pokeman/test', transform=tf)
# train_db = Pokemon('pokemon', 224, mode='train')
# val_db = Pokemon('pokemon', 224, mode='val')
# test_db = Pokemon('pokemon', 224, mode='test')
# train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True,
# num_workers=4)
# val_loader = DataLoader(val_db, batch_size=batchsz, num_workers=2)
# test_loader = DataLoader(test_db, batch_size=batchsz, num_workers=2)
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True)
val_loader = DataLoader(val_db, batch_size=batchsz)
test_loader = DataLoader(test_db, batch_size=batchsz)
# viz = visdom.Visdom()
#(2)初始化,注意可以给定路径
writer = SummaryWriter('runs/chen_pokeman_test1')
def evalute(model, loader):
model.eval()
correct = 0
total = len(loader.dataset)
for x,y in loader:
x,y = x.to(device), y.to(device)
with torch.no_grad():
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
return correct / total
def main():
#(1)如用anaconda激活你自己的环境
# conda env list
# conda activate chentorch_cp310
#(2)安装
# pip install visdom
#(3)使用
# python -m visdom.server
# http://localhost:8097/
model = ResNet18(5).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
criteon = nn.CrossEntropyLoss()
best_acc, best_epoch = 0, 0
global_step = 0
# viz.line([0], [-1], win='loss', opts=dict(title='loss'))
# viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
for epoch in range(epochs):
for step, (x,y) in enumerate(train_loader):
# x: [b, 3, 224, 224], y: [b]
x, y = x.to(device), y.to(device)
model.train()
logits = model(x)
loss = criteon(logits, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
#(3)将batch中TrainLoss添加到tensorboardX中
writer.add_scalar('TrainLoss', loss.item(), global_step=global_step)
# viz.line([loss.item()], [global_step], win='loss', update='append')
global_step += 1
if epoch % 1 == 0:
val_acc = evalute(model, val_loader)
if val_acc> best_acc:
best_epoch = epoch
best_acc = val_acc
torch.save(model.state_dict(), 'best.mdl')
#(4)将epoch中TestAcc添加到tensorboardX中
writer.add_scalar('TestAcc', val_acc, global_step=epoch)
# viz.line([val_acc], [global_step], win='val_acc', update='append')
print('best acc:', best_acc, 'best epoch:', best_epoch)
model.load_state_dict(torch.load('best.mdl'))
print('loaded from ckpt!')
test_acc = evalute(model, test_loader)
print('test acc:', test_acc)
#(5)关闭writer
writer.close()
#(6)查看tensorboardx的方法
#tensorboard --logdir=D:/pytorch_learning2022/3chen_classify_test2022/1pokeman_sample/runs/chen_pokeman_test1
#tensorboard --logdir=runs/chen_pokeman_test1
#http://localhost:6006/
if __name__ == '__main__':
main()