import torch
import torchvision.datasets
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from model import *
train_data = torchvision.datasets.CIFAR10(root="./data",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10(root="./data",train=False,transform=torchvision.transforms.ToTensor(),download=True)
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练集长度:{}".format(train_data_size))
print("测试集长度:{}".format(test_data_size))
train_dataLoader = DataLoader(train_data,batch_size=64)
test_dataLoader = DataLoader(test_data,batch_size=64)
test = seq()
loss_fun = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(test.parameters(),lr = 0.01)
total_train_step = 0
total_test_step = 0
epoch = 10
writer = SummaryWriter("logs_train")
for i in range(epoch):
print("----------第{}轮训练开始----------".format(i+1))
for data in train_dataLoader:
imgs , target = data
outputs = test(imgs)
loss = loss_fun(outputs,target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_step += 1
if total_train_step % 100 == 0:
print("训练次数:{},loss为:{}".format(total_train_step,loss))
writer.add_scalar("train_loss",loss.item(),total_train_step)
total_test_loss = 0
total_accuracy = 0
with torch.no_grad():
for data in test_dataLoader:
imgs,targets = data
outputs = test(imgs)
loss = loss_fun(outputs,targets)
total_test_loss += loss.item()
accuracy = (outputs.argmax(1) == targets).sum()
print("整体测试集上的Loss:{}".format(total_test_loss))
print("整体测试集上的正确率:{}".format(accuracy/test_data_size))
writer.add_scalar("test_loss",total_test_loss,total_test_step)
total_test_loss += 1
torch.save(test,"test_{},pth".format(i))
print("模型已经保存好")
writer.close()