使用cifar10数据集进行训练和验证(pytorch)
import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torch import nn, optim
import sys
batch_size = 32
input_size = 32
cifar_train = datasets.CIFAR10('cifar', train=True, transform=transforms.Compose([
transforms.Resize((input_size,input_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]), download=True)
cifar_train = DataLoader(cifar_train, batch_size=batch_size,shuffle=True)
cifar_test = datasets.CIFAR10('cifar', train=False, transform=transforms.Compose([
transforms.Resize((input_size,input_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229,0.224,0.225])
]), download=True)
cifar_test = DataLoader(cifar_test, batch_size=batch_size,shuffle=True)
x, label = iter(cifar_train).next()
print('x: ', x.shape, ' label: ', label.shape)
device = torch.device('cuda')
print(device)
criteon = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(),lr=1e-3)
print(model)
for epoch in range(15):
model.train()
for batchidx, (x, label) in enumerate(cifar_train):
x, label = x.to(device), label.to(device)
logits = model(x)
loss = criteon(logits, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(epoch, loss.item())
model.eval()
with torch.no_grad():
total_correct = 0
total_num = 0
for x, label in cifar_test:
x, label = x.to(device), label.to(device)
logits = model(x)
pred = logits.argmax(dim=1)
total_correct += torch.eq(pred,label).float().sum().item()
total_num += x.size(0)
acc = total_correct/ total_num
print("epoch: ",epoch, "acc: ", acc)