CIFAR10-分类模型完整套路

一、网络模型

十分类

二、具体步骤

搭建神经网络并验证网络正确性

准备数据集-dataset(训练数据集、验证数据集)

准备DataLoder

获取数据集尺寸

变量定义

损失函数

优化器

训练

验证

保存每一次训练后的模型

数据可视化

三、代码示例

NeuralNetwork_CIFAR10.py(神经网络):

import torch
from torch import nn

# CIFAR10-神经网络模型
class DQLD(nn.Module):
    def __init__(self):
        super(DQLD,self).__init__()
        self.module1 = nn.Sequential(
            nn.Conv2d(3,32,5,stride=1,padding=2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,32,5,stride=1,padding=2),
            nn.MaxPool2d(2),
            nn.Conv2d(32,64,5,stride=1,padding=2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(1024,64),
            nn.Linear(64,10)
        )
    def forward(self,x):
        output = self.module1(x)
        return output

#  测试验证网络正确性
if __name__ == '__main__':
    dqld = DQLD()
    input = torch.ones((64,3,32,32))
    output = dqld(input)
    print(output.shape)

Train_CIFAR10.py(训练类):

import torch.optim
import torchvision as torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
import NeuralNetwork_CIFAR10

# 1.准备数据集
# 训练集
train_dataset = torchvision.datasets.CIFAR10(root="../dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
train_dataloader = DataLoader(train_dataset,64)
# 验证集
validation_dataset = torchvision.datasets.CIFAR10(root="../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
validation_dataloader = DataLoader(validation_dataset,64)

#2.获取训练集大小
train_size = len(train_dataset)
#获取验证集大小
validation_size = len(validation_dataset)
print("训练集大小:{}".format(train_size))
print("验证集大小:{}".format(validation_size))

# 3.变量定义

# 训练步数
train_step = 0
# 验证步数
val_step = 0
# 训练_验证轮次
epoch = 10
# 单轮训练损失总和
loss_sum = 0
# tensorboard数据可视化
writer = SummaryWriter("../Trian_Vai_logs")

# 4.定义损失函数
loss_fun = nn.CrossEntropyLoss()

#5.引入模型
dqld = NeuralNetwork_CIFAR10.DQLD()

# 6.定义优化器
learning_rate = 0.001
optimizer = torch.optim.SGD(dqld.parameters(),lr=learning_rate)

# 7.训练+验证+模型保存
for i in range(epoch):
    train_step = 0
    print("-----------第{}轮训练----------".format(i+1))
    print("---第{}轮训练开始---".format(i + 1))
    for data in train_dataloader:
        imgs,targets = data
        output = dqld(imgs)
        loss = loss_fun(output,targets)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        loss_sum = loss_sum + loss.item()
        train_step = train_step + 1
        if(train_step % 100 == 0):
            print("第{}轮->第{}次训练损失值loss:{}".format(i+1,train_step,loss.item()))
            writer.add_scalar("train_loss_{}".format(i+1),loss.item(),train_step)
    print("---第{}轮训练总损失和:{}---".format(i+1,loss_sum))
    print("---第{}轮训练结束---".format(i + 1))

#     每次训练一轮次后对比训练效果(用测试集进行测试),保存模型
    total_validation_loss = 0
    print("---第{}轮验证开始---".format(i + 1))
    with torch.no_grad():
        for data in validation_dataloader:
            imgs,targets = data
            output = dqld(imgs)
            loss = loss_fun(output,targets)
            total_validation_loss = total_validation_loss + loss.item()

    writer.add_scalar("validation_loss_sum",total_validation_loss,i+1)
    torch.save(dqld,"CIFAR10_module{}.pth".format(i+1))
    print("---第{}轮验证总损失和:{}---".format(i+1,total_validation_loss))
    print("---第{}轮验证结束---".format(i + 1))
writer.close()

四、数据可视化

十次训练的具体的损失值及每次训练后验证集的总验证值

每一轮训练每100步显示当前的损失值,每张图对应一轮训练

可以看出每次训练优化参数后,loss损失值在减小

整体看一下10次训练验证集上的损失值总和

五、分类验证优化

分类问题可通过获取argmax(1)进行横向获取最大值下标,与targets标签进行比较,如果一样则预测正确。

代码:

import torch.optim
import torchvision as torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
import NeuralNetwork_CIFAR10

# 1.准备数据集
# 训练集
train_dataset = torchvision.datasets.CIFAR10(root="../dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
train_dataloader = DataLoader(train_dataset,64)
# 验证集
validation_dataset = torchvision.datasets.CIFAR10(root="../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
validation_dataloader = DataLoader(validation_dataset,64)

#2.获取训练集大小
train_size = len(train_dataset)
#获取验证集大小
validation_size = len(validation_dataset)
print("训练集大小:{}".format(train_size))
print("验证集大小:{}".format(validation_size))

# 3.变量定义

# 训练步数
train_step = 0
# 验证步数
val_step = 0
# 训练_验证轮次
epoch = 10
# 单轮训练损失总和
loss_sum = 0
# tensorboard数据可视化
writer = SummaryWriter("../Train_Vai_logs")

# 4.定义损失函数
loss_fun = nn.CrossEntropyLoss()

#5.引入模型
dqld = NeuralNetwork_CIFAR10.DQLD()

# 6.定义优化器
learning_rate = 0.001
optimizer = torch.optim.SGD(dqld.parameters(),lr=learning_rate)

# 7.训练+验证+模型保存

# 将网络设置成训练模式-只对一些含有特色层的网络有作用,官方解释如下
# This has any effect only on certain modules.
# See documentations of particular modules for details of their behaviors in training/evaluation mode,
# if they are affected, e.g. Dropout, BatchNorm, etc.
dqld.train()

for i in range(epoch):
    train_step = 0
    print("-----------第{}轮训练----------".format(i+1))
    print("---第{}轮训练开始---".format(i + 1))
    for data in train_dataloader:
        imgs,targets = data
        output = dqld(imgs)
        loss = loss_fun(output,targets)

        # 优化模块
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        loss_sum = loss_sum + loss.item()
        train_step = train_step + 1
        if(train_step % 100 == 0):
            print("第{}轮->第{}次训练损失值loss:{}".format(i+1,train_step,loss.item()))
            writer.add_scalar("train_loss_{}".format(i+1),loss.item(),train_step)
    print("---第{}轮训练总损失和:{}---".format(i+1,loss_sum))
    print("---第{}轮训练结束---".format(i + 1))

#     每次训练一轮次后对比训练效果(用测试集进行测试),保存模型
#     将网络设置成评估模式-只对一些含有特色层的网络有作用
#     This has any effect only on certain modules. 
#     See documentations of particular modules for details of their behaviors in training/evaluation mode, 
#     if they are affected, e.g. Dropout, BatchNorm, etc.
    dqld.eval()

    total_validation_loss = 0
    total_accuracy_sum = 0
    print("---第{}轮验证开始---".format(i + 1))

    # no_grad不需要进行优化,只进行测试
    with torch.no_grad():
        for data in validation_dataloader:
            imgs, targets = data
            output = dqld(imgs)
            # 获取预测最大概率标签(下标)
            prediction = output.argmax(1)
            accuracy = (prediction == targets).sum()
            total_accuracy_sum = total_accuracy_sum + accuracy
            loss = loss_fun(output, targets)
            total_validation_loss = total_validation_loss + loss.item()

    writer.add_scalar("validation_loss_sum", total_validation_loss, i + 1)
    writer.add_scalar("validation_accuracy_rate", (total_accuracy_sum/validation_size) * 100, i + 1)
    torch.save(dqld, "./Modules/CIFAR10_module{}.pth".format(i + 1))
    print("---第{}轮验证准确率:{}---".format(i + 1, total_accuracy_sum / validation_size))
    print("---第{}轮验证总损失和:{}---".format(i + 1, total_validation_loss))
    print("---第{}轮验证结束---".format(i + 1))
writer.close()

tensorboard结果显示:

 

六、采用cuda计算

6.1方式一(不常用)

import torch.optim
import torchvision as torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
import NeuralNetwork_CIFAR10

# 1.准备数据集
# 训练集
train_dataset = torchvision.datasets.CIFAR10(root="../dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
train_dataloader = DataLoader(train_dataset,64)
# 验证集
validation_dataset = torchvision.datasets.CIFAR10(root="../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
validation_dataloader = DataLoader(validation_dataset,64)

#2.获取训练集大小
train_size = len(train_dataset)
#获取验证集大小
validation_size = len(validation_dataset)
print("训练集大小:{}".format(train_size))
print("验证集大小:{}".format(validation_size))

# 3.变量定义

# 训练步数
train_step = 0
# 验证步数
val_step = 0
# 训练_验证轮次
epoch = 10
# 单轮训练损失总和
loss_sum = 0
# tensorboard数据可视化
writer = SummaryWriter("../Train_Vai_logs")

# 4.定义损失函数
loss_fun = nn.CrossEntropyLoss()
# 引入GPU
loss_fun = loss_fun.cuda()

#5.引入模型
dqld = NeuralNetwork_CIFAR10.DQLD()
# 引入GPU
dqld = dqld.cuda()
# 6.定义优化器
learning_rate = 0.001
optimizer = torch.optim.SGD(dqld.parameters(),lr=learning_rate)

# 7.训练+验证+模型保存

# 将网络设置成训练模式-只对一些含有特色层的网络有作用,官方解释如下
# This has any effect only on certain modules.
# See documentations of particular modules for details of their behaviors in training/evaluation mode,
# if they are affected, e.g. Dropout, BatchNorm, etc.
dqld.train()

for i in range(epoch):
    train_step = 0
    print("-----------第{}轮训练----------".format(i+1))
    print("---第{}轮训练开始---".format(i + 1))
    for data in train_dataloader:

        imgs,targets = data
        imgs = imgs.cuda()
        targets = targets.cuda()
        output = dqld(imgs)
        loss = loss_fun(output,targets)

        # 优化模块
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        loss_sum = loss_sum + loss.item()
        train_step = train_step + 1
        if(train_step % 100 == 0):
            print("第{}轮->第{}次训练损失值loss:{}".format(i+1,train_step,loss.item()))
            writer.add_scalar("train_loss_{}".format(i+1),loss.item(),train_step)
    print("---第{}轮训练总损失和:{}---".format(i+1,loss_sum))
    print("---第{}轮训练结束---".format(i + 1))

#     每次训练一轮次后对比训练效果(用测试集进行测试),保存模型
#     将网络设置成评估模式-只对一些含有特色层的网络有作用
#     This has any effect only on certain modules.
#     See documentations of particular modules for details of their behaviors in training/evaluation mode,
#     if they are affected, e.g. Dropout, BatchNorm, etc.
    dqld.eval()

    total_validation_loss = 0
    total_accuracy_sum = 0
    print("---第{}轮验证开始---".format(i + 1))

    # no_grad不需要进行优化,只进行测试
    with torch.no_grad():
        for data in validation_dataloader:
            imgs, targets = data
            imgs = imgs.cuda()
            targets = targets.cuda()
            output = dqld(imgs)
            # 获取预测最大概率标签(下标)
            prediction = output.argmax(1)
            accuracy = (prediction == targets).sum()
            total_accuracy_sum = total_accuracy_sum + accuracy
            loss = loss_fun(output, targets)
            total_validation_loss = total_validation_loss + loss.item()

    writer.add_scalar("validation_loss_sum", total_validation_loss, i + 1)
    writer.add_scalar("validation_accuracy_rate",(total_accuracy_sum/validation_size) * 100, i + 1)
    torch.save(dqld, "./Modules/CIFAR10_module{}.pth".format(i + 1))
    print("---第{}轮验证准确率:{}---".format(i + 1, total_accuracy_sum / validation_size))
    print("---第{}轮验证总损失和:{}---".format(i + 1, total_validation_loss))
    print("---第{}轮验证结束---".format(i + 1))
writer.close()

6.2方式二(常用)

import torch.optim
import torchvision as torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
import NeuralNetwork_CIFAR10

# 定义设备-单显卡下面两句没有区别多个显卡可进行指定显卡
device = torch.device("cuda")
# device = torch.device("cuda:0")
# device = torch.device("cuda:1")

# 1.准备数据集
# 训练集
train_dataset = torchvision.datasets.CIFAR10(root="../dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
train_dataloader = DataLoader(train_dataset,64)
# 验证集
validation_dataset = torchvision.datasets.CIFAR10(root="../dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
validation_dataloader = DataLoader(validation_dataset,64)

#2.获取训练集大小
train_size = len(train_dataset)
#获取验证集大小
validation_size = len(validation_dataset)
print("训练集大小:{}".format(train_size))
print("验证集大小:{}".format(validation_size))

# 3.变量定义

# 训练步数
train_step = 0
# 验证步数
val_step = 0
# 训练_验证轮次
epoch = 10
# 单轮训练损失总和
loss_sum = 0
# tensorboard数据可视化
writer = SummaryWriter("../Train_Vai_logs")

# 4.定义损失函数
loss_fun = nn.CrossEntropyLoss()
# 引入GPU
loss_fun = loss_fun.cuda()
# 也可以直接to不进行返回
loss_fun.to(device)
# loss_fun = loss_fun.to(device)

#5.引入模型
dqld = NeuralNetwork_CIFAR10.DQLD()
# 引入GPU
dqld = dqld.cuda()
# dqld = dqld.to(device)
# 也可以直接to不进行返回
dqld.to(device)

# 6.定义优化器
learning_rate = 0.001
optimizer = torch.optim.SGD(dqld.parameters(),lr=learning_rate)

# 7.训练+验证+模型保存

# 将网络设置成训练模式-只对一些含有特色层的网络有作用,官方解释如下
# This has any effect only on certain modules.
# See documentations of particular modules for details of their behaviors in training/evaluation mode,
# if they are affected, e.g. Dropout, BatchNorm, etc.
dqld.train()

for i in range(epoch):
    train_step = 0
    print("-----------第{}轮训练----------".format(i+1))
    print("---第{}轮训练开始---".format(i + 1))
    for data in train_dataloader:

        imgs,targets = data
        # 必须进行返回赋值给ings和targets
        imgs = imgs.to(device)
        targets = targets.to(device)
        output = dqld(imgs)
        loss = loss_fun(output,targets)

        # 优化模块
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        loss_sum = loss_sum + loss.item()
        train_step = train_step + 1
        if(train_step % 100 == 0):
            print("第{}轮->第{}次训练损失值loss:{}".format(i+1,train_step,loss.item()))
            writer.add_scalar("train_loss_{}".format(i+1),loss.item(),train_step)
    print("---第{}轮训练总损失和:{}---".format(i+1,loss_sum))
    print("---第{}轮训练结束---".format(i + 1))

#     每次训练一轮次后对比训练效果(用测试集进行测试),保存模型
#     将网络设置成评估模式-只对一些含有特色层的网络有作用
#     This has any effect only on certain modules.
#     See documentations of particular modules for details of their behaviors in training/evaluation mode,
#     if they are affected, e.g. Dropout, BatchNorm, etc.
    dqld.eval()

    total_validation_loss = 0
    total_accuracy_sum = 0
    print("---第{}轮验证开始---".format(i + 1))

    # no_grad不需要进行优化,只进行测试
    with torch.no_grad():
        for data in validation_dataloader:
            imgs, targets = data
            # 必须进行返回赋值给ings和targets
            imgs = imgs.to(device)
            targets = targets.to(device)
            output = dqld(imgs)
            # 获取预测最大概率标签(下标)
            prediction = output.argmax(1)
            accuracy = (prediction == targets).sum()
            total_accuracy_sum = total_accuracy_sum + accuracy
            loss = loss_fun(output, targets)
            total_validation_loss = total_validation_loss + loss.item()

    writer.add_scalar("validation_loss_sum", total_validation_loss, i + 1)
    writer.add_scalar("validation_accuracy_rate",(total_accuracy_sum/validation_size) * 100, i + 1)
    torch.save(dqld, "./Modules/CIFAR10_module{}.pth".format(i + 1))
    print("---第{}轮验证准确率:{}---".format(i + 1, total_accuracy_sum / validation_size))
    print("---第{}轮验证总损失和:{}---".format(i + 1, total_validation_loss))
    print("---第{}轮验证结束---".format(i + 1))
writer.close()

6.3对比采用cuda与cpu的速度

采用cuda

采用cpu

  • 4
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

DQ小恐龙

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值