import torch.optim.sgd import torchvision # 准备数据集 from torch import nn from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter # 定义训练的设备 device=torch.device('cpu') writer=SummaryWriter('./log_train') train_data=torchvision.datasets.CIFAR10(root='./data',train=True,transform=torchvision.transforms.ToTensor(), download=True) test_data=torchvision.datasets.CIFAR10(root='./data',train=False,transform=torchvision.transforms.ToTensor(), download=True) train_data_size=len(train_data) test_data_size=len(test_data) # 格式化字符串 print('训练数据集长度{}'.format(train_data_size)) print('测试数据集长度{}'.format(test_data_size)) # 利用dataloader来加载数据集 train_dataloader=DataLoader(train_data,batch_size=64) test_dataloader=DataLoader(test_data,batch_size=64) # 搭建神经网络 class li(nn.Module): def __init__(self): super(li, self).__init__() self.model=nn.Sequential( nn.Conv2d(3,32,5,1,2), nn.MaxPool2d(2), nn.Conv2d(32,32,5,1,2) ,nn.MaxPool2d(2) ,nn.Conv2d(32,64,5,1,2) ,nn.MaxPool2d(2) ,nn.Flatten() ,nn.Linear(64*4*4,64) ,nn.Linear(64,10) ) def forward(self,x): x=self.model(x) return x LI=li() LI=LI.to(device) # 创建损失函数 loss_fn=nn.CrossEntropyLoss() loss_fn=loss_fn.to(device) # 优化器 learning_rate=0.01 optimizer=torch.optim.SGD(LI.parameters(),lr=learning_rate) # 设置训练网络的一些参数 # 记录训练次数 total_train_step=0 # 记录测试次数 total_test_step=0 # 训练的轮数 epoch=20 for i in range(epoch): print('---------第{}轮开始'.format(i+1)) # 训练开始 LI.train() for data in train_dataloader: imgs,targets=data imgs=imgs.to(device) targets=targets.to(device) outputs=LI(imgs) loss=loss_fn(outputs,targets) optimizer.zero_grad() loss.backward()#梯度下降计算新的梯度 optimizer.step() total_train_step=total_train_step+1 if total_train_step%100==0: print('训练次数{},loss{}'.format(total_train_step,loss)) writer.add_scalar('train_loss',loss.item(),total_train_step) # 测试步骤 LI.eval() #调用模块 total_test_loss=0 total_accuracy=0 with torch.no_grad(): for data in test_dataloader: imgs,targets=data imgs = imgs.to(device) targets = targets.to(device) outputs=LI(imgs) loss=loss_fn(outputs,targets) total_test_loss=total_test_loss+loss.item() accuracy=(outputs.argmax(1)==targets).sum() total_accuracy+=accuracy print('整体loss{}'.format(total_test_loss)) print('zhengquelv-整体:{}'.format(total_accuracy/test_data_size)) writer.add_scalar('test_loss',total_test_loss,total_test_step) writer.add_scalar('test_accuracy',total_accuracy/test_data_size,total_test_step) total_test_step+=1 torch.save(LI,'li{}.pth'.format(i)) print('saved') writer.close()
卷积神经网络-训练代码(cifar10)
最新推荐文章于 2024-08-18 12:47:57 发布
该博客详细介绍了如何使用PyTorch构建和训练一个卷积神经网络(CNN)来处理CIFAR10数据集。首先,它导入了必要的库和数据集,并定义了CNN模型。接着,它设置了训练和测试数据加载器,使用SGD优化器和交叉熵损失函数进行训练。博客还展示了在训练过程中记录损失,并进行了模型的测试和保存。
摘要由CSDN通过智能技术生成