PyTorch学习总结--完整训练

        训练过程中定义了损失函数,激活函数,优化器,并进行了测试,计算了测试准确率,并用tensorboard进行可视化,数据集采用torchvision的CIFAR10,并运用GPU训练

        🚀代码如下:

train.py

from torch import nn
import torch
import torchvision
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, ReLU
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

from module import *

# 准备数据集
train_data = torchvision.datasets.CIFAR10(root='./dataset', train=True, transform=torchvision.transforms.ToTensor(),
                                          download=True)
val_data = torchvision.datasets.CIFAR10(root='./dataset', train=False, transform=torchvision.transforms.ToTensor(),
                                        download=True)

train_data_size = len(train_data)
val_data_size = len(val_data)

# 加载数据集
train_loader = DataLoader(train_data, batch_size=64)
val_loader = DataLoader(val_data, batch_size=64)

# 搭建神经网络-----10分类
Mynn = deep_nn()
Mynn = Mynn.cuda() # 运用gpu训练只有网络模型,损失函数,数据部分
# 定义损失函数交叉熵
loss_fc = nn.CrossEntropyLoss()
loss_fc = loss_fc.cuda()
# 定义优化器SGD随机梯度下降
opmiz = torch.optim.SGD(Mynn.parameters(), lr=0.01)

# 设置训练参数
step_train = 0
step_val = 0
# 训练轮数
epochs = 10

# tensorboard查看训练过程
writer = SummaryWriter('logs')

for i in range(epochs):
    print("第 {} 轮训练开始".format(i + 1))
    # 训练开始
    Mynn.train()  # 设置为训练模式,如果网络中有dropout、Batchnorm会发挥作用
    for data in train_loader:
        imgs, taegets = data
        imgs = imgs.cuda()
        taegets = taegets.cuda()
        outputs = Mynn(imgs)
        res_loss = loss_fc(outputs, taegets)
        opmiz.zero_grad()
        # 反向传播
        res_loss.backward()
        opmiz.step()

        step_train += 1
        if step_train % 100 == 0:
            print("训练次数 {}  损失值 {}".format(step_train, res_loss))

            writer.add_scalar(tag='train_loss', scalar_value=res_loss, global_step=step_train)

    # 测试开始
    # 设置为测试模式,如果网络中有dropout、Batchnorm会发挥作用
    Mynn.eval()
    val_total_accu = 0  # 定义每一轮测试准确率
    val_total_loss = 0  # 定义每一轮测试损失
    # 无需调优
    with torch.no_grad():
        # 测试
        for data in val_loader:
            imgs, targets = data
            imgs = imgs.cuda()
            targets = targets.cuda()
            outputs = Mynn(imgs)
            res_loss = loss_fc(outputs, targets)
            val_total_loss += res_loss.item()  # 计算每一轮损失
            val_accu = (outputs.argmax(1) == targets).sum()  # 计算每次正确预测的格式
            val_total_accu += val_accu
    print("第 {} 轮测试集正确率 {}".format(i + 1, val_total_accu / val_data_size))
    writer.add_scalar(tag='val_Loss', scalar_value=val_total_loss, global_step=step_val)
    writer.add_scalar(tag='test_accu', scalar_value=val_total_accu / val_data_size, global_step=step_val)
    step_val += 1
    print("测试集loss {} ".format(val_total_loss))

    # 保存每一轮训练模型
    # torch.save(Mynn, 'epoch_{}.pt'.format(i+1))

writer.close()

module.py

import torch
from torch import nn
from torch.nn import Conv2d, ReLU, MaxPool2d, Flatten, Linear


class deep_nn(nn.Module):
    def __init__(self):
        super().__init__()
        # 第一个卷积层输入通道为3,输出通道为32,卷积核5×5,计算得padding=2
        self.conv1 = Conv2d(3, 32, 5, padding=2)
        # 引入激活函数
        self.relu = ReLU()
        # 池化层不改变通道数,默认不填充
        self.maxpool1 = MaxPool2d(2)
        self.conv2 = Conv2d(32, 32, 5, padding=2)
        self.maxpool2 = MaxPool2d(2)
        self.conv3 = Conv2d(32, 64, 5, padding=2)
        self.maxpool3 = MaxPool2d(2)
        self.flatten = Flatten()
        self.liner1 = Linear(1024, 64)
        self.liner2 = Linear(64, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.relu(x)
        x = self.maxpool2(x)
        x = self.conv3(x)
        x = self.relu(x)
        x = self.maxpool3(x)
        x = self.flatten(x)
        x = self.liner1(x)
        x = self.liner2(x)
        return x

        🚀训练过程

        🔥可以看出,损失都在不断减少,而准确率在升高 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值