tensor-CIFAR10数据集加载及模型搭建

本次案例是CIFAR10分类({‘airplane’: 0, ‘automobile’: 1, ‘bird’: 2, ‘cat’: 3, ‘deer’: 4, ‘dog’: 5, ‘frog’: 6, ‘horse’: 7, ‘ship’: 8, ‘truck’: 9})

首先对数据集准备。

from torchvision import datasets
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
import ssl  # 取消认证

ssl._create_default_https_context = ssl._create_unverified_context  # 没有数据集进行下载时候需要取消认证,不然无法下载

tensor = ToTensor()
train_set = datasets.CIFAR10("../dataset", train=True, transform=tensor , download=True)
"""
  Args:
        root (string): Root directory of dataset where directory  指定数据存放位置
            ``cifar-10-batches-py`` exists or will be saved to if download is set to True.
        train (bool, optional): If True, creates dataset from training set, otherwise  是否训练集
            creates from test set.
        transform (callable, optional): A function/transform that takes in an PIL image  图片操作
            and returns a transformed version. E.g, ``transforms.RandomCrop``
        target_transform (callable, optional): A function/transform that takes in the
            target and transforms it.
        download (bool, optional): If true, downloads the dataset from the internet and  没有数据是否进行下载
            puts it in root directory. If dataset is already downloaded, it is not
            downloaded again.

"""

train_set变量具体信息
在这里插入图片描述

数据的加载


from torch.utils.data import DataLoader  # 数据打包处理
from torchvision.datasets import CIFAR10  # 提供数据集
from torchvision.transforms import ToTensor  # PIL转换成tensor
from torch.utils.tensorboard import SummaryWriter  # 把数据写进tensorboard

tensor = ToTensor()
text_set = CIFAR10("../dataset", train=False, transform=tensor, download=True)  #提供数据集
data_loader = DataLoader(dataset=text_set, batch_size=64, shuffle=True, drop_last=True)   # 加载数据集
"""
  Data loader. Combines a dataset and a sampler, and provides an iterable over
    the given dataset.
    dataset (Dataset): dataset from which to load the data.  要加载的数据
    batch_size (int, optional): how many samples per batch to load 一次性加载多少数据 最好2的倍数 16 32 64 等,设置越大对电脑性能要求高
    (default: ``1``).
    shuffle (bool, optional): set to ``True`` to have the data reshuffled  每次循环是否打乱数据
    at every epoch (default: ``False``).
    drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,  最后一批不足是舍弃
            if the dataset size is not divisible by the batch size. If ``False`` and
            the size of dataset is not divisible by the batch size, then the last batch
            will be smaller. (default: ``False``)
"""
writer = SummaryWriter("../data_loader")
step = 0
for data in data_loader:
    imgs, targets = data
    writer.add_images("loader", imgs, step)
    step += 1

writer.close()


在这里插入图片描述
在这里插入图片描述

训练代码,整体来说训练效果不好,测试的效果也不好(等后面进行优化吧)整体熟悉一下流程

import torchvision
from torch.utils.tensorboard import SummaryWriter

from model import *
# 准备数据集
from torch import nn
from torch.utils.data import DataLoader
"""
模型训练:
1 准备数据  torchvision.dataset
2 加载数据,并设置batch   torch.utils.data
3 初始化 网络模型,损失函数,优化函数,SummaryWriter
4 开始训练,使用网络训练,计算损失,绘制损失函数图,优化参数,保存每步模型

"""
train_data = torchvision.datasets.CIFAR10(root="../../dataset", train=True, transform=torchvision.transforms.ToTensor(),
                                          download=True)
test_data = torchvision.datasets.CIFAR10(root="../../dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                         download=True)

# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_size=10, 训练数据集的长度为:10
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))


# 利用 DataLoader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)

# 创建网络模型
tudui = Tudui()

# 损失函数
loss_fn = nn.CrossEntropyLoss()

# 优化器
# learning_rate = 0.01
# 1e-2=1 x (10)^(-2) = 1 /100 = 0.01
learning_rate = 1e-2
optimizer = torch.optim.SGD(tudui.parameters(), lr=learning_rate)

# 设置训练网络的一些参数
# 记录训练的次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练的轮数  可以自己修改次数 推荐50左右
epoch = 50

max_acc = 0  # 找出准确最高的那次,进

# 添加tensorboard
writer = SummaryWriter("../../logs_train")

for i in range(epoch):
    print("-------第 {} 轮训练开始-------".format(i+1))

    # 训练步骤开始
    tudui.train()
    for data in train_dataloader:
        imgs, targets = data
        outputs = tudui(imgs)
        loss = loss_fn(outputs, targets)

        # 优化器优化模型
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        total_train_step = total_train_step + 1
        if total_train_step % 100 == 0:
            print("训练次数:{}, Loss: {}".format(total_train_step, loss.item()))
            writer.add_scalar("train_loss", loss.item(), total_train_step)

    # 测试步骤开始
    tudui.eval()
    total_test_loss = 0
    total_accuracy = 0
    with torch.no_grad():
        for data in test_dataloader:
            imgs, targets = data
            outputs = tudui(imgs)
            loss = loss_fn(outputs, targets)
            total_test_loss = total_test_loss + loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accuracy

    print("整体测试集上的Loss: {}".format(total_test_loss))
    print("整体测试集上的正确率: {}".format(total_accuracy/test_data_size))
    writer.add_scalar("test_loss", total_test_loss, total_test_step)
    writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)
    total_test_step = total_test_step + 1

    if max_acc < accuracy:
        max_acc = accuracy
        torch.save(tudui, "tudui.pth")
        print("模型已保存")

writer.close()

在这里插入图片描述

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
参考视频哔哩哔哩小土堆

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值