使用cifar10数据集进行训练和验证(pytorch)

使用cifar10数据集进行训练和验证(pytorch)

import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
# from lenet5 import Lenet5
# from ResNet import ResNet18
# from AlexNet import AlexNet
# from VGG import vgg16_bn
# from VGG_2 import vgg16_bn
from torch import nn, optim
import sys



batch_size = 32 # 批次
input_size = 32 #输入的image大小
# 总的来说,CIFAR10数据集是[batch_size, 3,32,32]

# 加载训练集
cifar_train = datasets.CIFAR10('cifar', train=True, transform=transforms.Compose([
    transforms.Resize((input_size,input_size)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225])
]), download=True)
cifar_train = DataLoader(cifar_train, batch_size=batch_size,shuffle=True) #其实把cifar_train命名为cifar_trainer比较好理解。以后如果训练自己的数据集,也可以使用DataLoader来加载。

# 加载测试集
cifar_test = datasets.CIFAR10('cifar', train=False, transform=transforms.Compose([
    transforms.Resize((input_size,input_size)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229,0.224,0.225])
]), download=True)
cifar_test = DataLoader(cifar_test, batch_size=batch_size,shuffle=True) #同上



x, label = iter(cifar_train).next()
print('x: ', x.shape, ' label: ', label.shape) #查看一个batch_size的训练集的大小

device = torch.device('cuda') # 如果有cuda可以切换,下面那句话比较好一点
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)

# model可以是任意的分类模型,记得补上!!
# model = Lenet5().to(device)
# model = ResNet18().to(device)
# model = AlexNet().to(device) 
# model = vgg16_bn().to(device)

criteon = nn.CrossEntropyLoss().to(device) #计算loss
optimizer = optim.Adam(model.parameters(),lr=1e-3) #优化器
print(model)

for epoch in range(15):
    model.train() #设置为train模式
    for batchidx, (x, label) in enumerate(cifar_train):
        # x [b,3,32,32]
        # label [b]
        x, label = x.to(device), label.to(device)
        logits = model(x)
        # logits: [b,10]
        # label:  [b]
        # loss: tensor scalar
        loss = criteon(logits, label)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    print(epoch, loss.item())


    model.eval() #设置为测试模式
    with torch.no_grad():
        # test
        total_correct = 0
        total_num = 0
        for x, label in cifar_test:
            x, label = x.to(device), label.to(device)

            # [b, 10]
            logits = model(x)

            # [b]
            pred = logits.argmax(dim=1) #返回dim=1的那个格子是1的index
            # [b] -> scalar tensor
            total_correct += torch.eq(pred,label).float().sum().item() #分别为是否相等,scalar tensor转换为float,求和,拿出值
            total_num += x.size(0) # x.size(0)代表b的值(x是[batch_size, channels, h', w'])
        acc = total_correct/ total_num
        print("epoch: ",epoch, "acc: ", acc)

  • 0
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值