"""
手写数字分类 卷积池化模型
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
import matplotlib.pyplot as plt
from tqdm import tqdm
from torchvision import datasets, transforms
transformation = transforms.Compose([
transforms.ToTensor()
])
train_ds = datasets.MNIST("./data", train=True, transform=transformation, download=True)
test_ds = datasets.MNIST("./data", train=False, transform=transformation, download=True)
train_dl = DataLoader(train_ds, batch_size=128, shuffle=True)
test_dl = DataLoader(test_ds, batch_size=256)
imgs, labels = next(iter(train_dl))
print("labels[0]:\t", labels[0].item())
"""
创建模型
"""
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool = nn.MaxPool2d((2,2))
self.conv2 = nn.Conv2d(6, 16, 5)
self.liner_1 = nn.Linear(16*4*4, 256)
self.liner_2 = nn.Linear(256, 10)
def forward(self, input):
x = F.relu(self.conv1(input))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.view(x.size(0), -1)
x = F.relu(self.liner_1(x))
x = self.liner_2(x)
return x
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device", device)
"""
在GPU上训练只需要两步:
1. 将模型转移到GPU
2. 将每一个批次的训练数据转移到GPU
"""
model = Model()
model = model.to(device)
print("model:\t", model)
loss_fn = torch.nn.CrossEntropyLoss()
def fit(epoch, model, trainloader, testloader):
correct = 0
total = 0
running_loss = 0
for x, y in trainloader:
x, y = x.to(device), y.to(device)
y_pred = model(x)
loss = loss_fn(y_pred, y)
optim.zero_grad()
loss.backward()
optim.step()
with torch.no_grad():
y_pred = torch.argmax(y_pred, dim=1)
correct += (y_pred == y).sum().item()
total += y.size(0)
running_loss += loss.item()
epoch_loss = running_loss / len(trainloader.dataset)
epoch_acc = correct / total
test_correct = 0
test_total = 0
test_running_loss = 0
with torch.no_grad():
for x, y in testloader:
x, y = x.to(device), y.to(device)
y_pred = model(x)
loss = loss_fn(y_pred, y)
y_pred = torch.argmax(y_pred, dim=1)
test_correct += (y_pred == y).sum().item()
test_total += y.size(0)
test_running_loss += loss.item()
epoch_test_loss = test_running_loss / len(testloader.dataset)
epoch_test_acc = test_correct / test_total
print('epoch: ', epoch,
'loss: ', round(epoch_loss, 3),
'accuracy:', round(epoch_acc, 3),
'test_loss: ', round(epoch_test_loss, 3),
'test_accuracy:', round(epoch_test_acc, 3)
)
return epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc
optim = torch.optim.Adam(model.parameters(), lr=0.001)
epochs = 20
train_loss = []
train_acc = []
test_loss = []
test_acc = []
for epoch in range(epochs):
epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc = fit(epoch,
model,
train_dl,
test_dl)
train_loss.append(epoch_loss)
train_acc.append(epoch_acc)
test_loss.append(epoch_test_loss)
test_acc.append(epoch_test_acc)
labels[0]: 5
device cuda:0
model: Model(
(conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
(pool): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
(conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(liner_1): Linear(in_features=256, out_features=256, bias=True)
(liner_2): Linear(in_features=256, out_features=10, bias=True)
)
epoch: 0 loss: 0.003 accuracy: 0.898 test_loss: 0.0 test_accuracy: 0.972
epoch: 1 loss: 0.001 accuracy: 0.974 test_loss: 0.0 test_accuracy: 0.983
epoch: 2 loss: 0.0 accuracy: 0.981 test_loss: 0.0 test_accuracy: 0.986
epoch: 3 loss: 0.0 accuracy: 0.985 test_loss: 0.0 test_accuracy: 0.986
epoch: 4 loss: 0.0 accuracy: 0.988 test_loss: 0.0 test_accuracy: 0.988
epoch: 5 loss: 0.0 accuracy: 0.989 test_loss: 0.0 test_accuracy: 0.989
epoch: 6 loss: 0.0 accuracy: 0.991 test_loss: 0.0 test_accuracy: 0.988
epoch: 7 loss: 0.0 accuracy: 0.992 test_loss: 0.0 test_accuracy: 0.988
epoch: 8 loss: 0.0 accuracy: 0.993 test_loss: 0.0 test_accuracy: 0.99
epoch: 9 loss: 0.0 accuracy: 0.994 test_loss: 0.0 test_accuracy: 0.99
epoch: 10 loss: 0.0 accuracy: 0.995 test_loss: 0.0 test_accuracy: 0.99
epoch: 11 loss: 0.0 accuracy: 0.996 test_loss: 0.0 test_accuracy: 0.989
epoch: 12 loss: 0.0 accuracy: 0.997 test_loss: 0.0 test_accuracy: 0.988
epoch: 13 loss: 0.0 accuracy: 0.996 test_loss: 0.0 test_accuracy: 0.988
epoch: 14 loss: 0.0 accuracy: 0.997 test_loss: 0.0 test_accuracy: 0.989
epoch: 15 loss: 0.0 accuracy: 0.997 test_loss: 0.0 test_accuracy: 0.989
epoch: 16 loss: 0.0 accuracy: 0.998 test_loss: 0.0 test_accuracy: 0.99
epoch: 17 loss: 0.0 accuracy: 0.997 test_loss: 0.0 test_accuracy: 0.989
epoch: 18 loss: 0.0 accuracy: 0.998 test_loss: 0.0 test_accuracy: 0.991
epoch: 19 loss: 0.0 accuracy: 0.998 test_loss: 0.0 test_accuracy: 0.991