#完整模型训练套路
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torchvision
#准备数据集
train_data=torchvision.datasets.CIFAR10("./dataset",train=True,download=True,transform=torchvision.transforms.ToTensor())
test_data=torchvision.datasets.CIFAR10("./dataset",train=False,download=True,transform=torchvision.transforms.ToTensor())
#获取数据集长度
train_data_size=len(train_data)
test_data_size=len(test_data)
print("训练数据集的长度为{}".format((train_data_size)))
print("测试数据集的长度为{}".format((test_data_size)))
#利用DataLodar来加载数据集
train_dtatloader=DataLoader(train_data,batch_size=64)
test_dataloader=DataLoader(test_data,batch_size=64,shuffle=True,num_workers=0)
#搭建神经网络
class tudui(nn.Module):
def __init__(self):
super(tudui,self).__init__()
self.model=nn.Sequential(
Conv2d(in_channels=3,out_channels=32,kernel_size=5,stride=1,padding=2),#2维卷积
MaxPool2d(2),#最大池化
Conv2d(32,32,5,1,2),
MaxPool2d(2),
Conv2d(32,64,5,1,2),
MaxPool2d(2),
Flatten(),#展平
Linear(64*4*4,64),#线性层,用于连接提取的特征
Linear(64,10)
)
def forward(self,x):
x=self.model(x)
return x
#创建网络模型
tudui=tudui()
#损失函数
loss_fn=CrossEntropyLoss()
#优化器
learning_rate=1e-2
optimizer=torch.optim.SGD(tudui.parameters(),lr=learning_rate)#选择SGD优化器
#设置训练网络的一些参数
#记录训练次数
total_train_step=0
#记录测试次数
total_test_step=0
#训练轮数
epoch=1
#添加tensorboard
writer=SummaryWriter("./logs_train2")
for i in range(epoch):#开始训练
print("-----第{}轮训练开始------".format(i+1))
#训练步骤开始
tudui.train()#设置模型为训练状态;只对特定层有作用,Dropout层,BatchNorm层
for data in train_dtatloader:
imgs,targets=data#加载数据
outputs=tudui(imgs)#模型加载
loss=loss_fn(outputs,targets)#计算loss
#优化器优化模型
optimizer.zero_grad()#梯度清零
loss.backward()#反向传播
optimizer.step()#更新模型参数
total_train_step=total_train_step+1#记录训练次数
if total_train_step%100==0:#训练100整除的数再打印被100整除次数的loss
print("训练次数{},loss:{}".format(total_train_step,loss))#打印loss
writer.add_scalar("train_loss",loss.item(),total_train_step)
#测试步骤开始
tudui.eval()#设置模型验证状态;只对特定层有作用,Dropout层,BatchNorm层
total_test_loss=0
total_accuracy=0
with torch.no_grad(): #去除梯度,防止模型调优
for data in test_dataloader:
imgs,targets=data
outputs=tudui(imgs)
loss=loss_fn(outputs,targets)
total_test_loss=total_test_loss+loss
accuracy = (outputs.argmax(1) == targets).sum() # 横向找预测的概率,并判断是否与目标一致,再求和
total_accuracy = total_accuracy + accuracy
print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
print("整体测试集上的loss:{}".format(total_test_loss))
writer.add_scalar("test_loss", loss.item(), total_test_step)
writer.add_scalar("test_acc", accuracy, total_test_step)
total_test_step = total_test_step + 1
torch.save(tudui.state_dict(),"tudui_{}.pth".format(i))#保存每一轮训练的结果
# torch.save(tudui, "tudui_{}.pth".format(i))#保存每一轮训练的结果
print("模型已保存")
writer.close()
pytorch学习—模型训练全流程
最新推荐文章于 2023-08-05 17:43:03 发布