完整的训练套路(四) 完整的代码
1. 模型的定义 models.py
from torch import nn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(3, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, 1, 2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(1024, 64),
nn.Linear(64, 10)
)
def forward(self, x):
x = self.model(x)
return x
if __name__ == "__main__":
model = Model()
input = torch.ones(64, 3, 32, 32)
output = model(input)
print(output.size())
2. 训练主文件
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
train_data = torchvision.datasets.CIFAR10(root = "./data_torchvision", train = True, download = True,
transform=torchvision.transforms.ToTensor())
test_data = torchvision.datasets.CIFAR10(root = "./data_torchvision", train = False, download = True,
transform=torchvision.transforms.ToTensor())
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
loss_fn = nn.CrossEntropyLoss()
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
total_train_step = 0
total_test_step = 0
epoch = 10
write = SummaryWriter("log")
for i in range(epoch):
model.train()
for data in train_dataloader:
imgs, targets = data
outputs = model(imgs)
loss = loss_fn(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_step += 1
if total_train_step % 100 == 0:
write.add_scalar("train loss", loss.item(), total_train_step)
torch.save(model, f"model_epoch = {epoch}.pth")
model.eval()
total_test_loss = 0
with torch.no_grad():
for data in test_dataloader:
imgs, targets = data
outputs = model(imgs)
loss = loss_fn(outputs, targets)
total_test_loss += loss.item()
total_test_step += 1
write.add_scalar("test loss", total_test_loss, total_test_step)
write.close()