代码实现
"""
"""
import torch
import torchvision
from torchvision import datasets
from torchvision import transforms
import matplotlib.pyplot as plt
from torch.autograd import Variable
import torch.nn
import pylab
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, ))])
data_train = datasets.MNIST(
root="./data/",
transform=transform,
train=True,
download=True
)
data_test = datasets.MNIST(
root="./data/",
transform=transform,
train=False,
download=True
)
data_loader_train = torch.utils.data.DataLoader(
dataset=data_train,
batch_size=64,
shuffle=True
)
data_loader_test = torch.utils.data.DataLoader(
dataset=data_test,
batch_size=64,
shuffle=True
)
def preview():
images, labels = next(iter(data_loader_train))
img = torchvision.utils.make_grid(images)
img = img.numpy().transpose(1, 2, 0)
std = [0.5, 0.5, 0.5]
mean = [0.5, 0.5, 0.5]
img = img*std+mean
print([labels[i] for i in range(64)])
plt.imshow(img)
pylab.show()
preview()
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = torch.nn.Sequential(torch.nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(stride=2, kernel_size=2))
self.dense = torch.nn.Sequential(torch.nn.Linear(14*14*128, 1024),
torch.nn.ReLU(),
torch.nn.Dropout(p=0.5),
torch.nn.Linear(1024, 10))
def forward(self, x):
x = self.conv1(x)
x = x.view(-1, 14*14*128)
x = self.dense(x)
return x
model = Model()
print(model)
if torch.cuda.is_available():
model.cuda()
LOSS_FUNCTION = torch.nn.CrossEntropyLoss()
optimzer = torch.optim.Adam(model.parameters())
EPOCH_N = 5
for EPOCH in range(EPOCH_N):
running_loss = 0.0
running_correct = 0
print(f"-------------{EPOCH}/{EPOCH_N}-------------")
for data in data_loader_train:
X_train, y_train = data
X_train, y_train = X_train.cuda(), y_train.cuda()
X_train, y_train = Variable(X_train), Variable(y_train)
outputs = model(X_train)
_, pred = torch.max(outputs.data, 1)
LOSS = LOSS_FUNCTION(outputs, y_train)
optimzer.zero_grad()
LOSS.backward()
optimzer.step()
running_loss += LOSS.item()
running_correct += torch.sum(pred == y_train.data)
testing_correct = 0
for data in data_loader_test:
X_test, y_test = data
X_test, y_test = X_test.cuda(), y_test.cuda()
X_test, y_test = Variable(X_test), Variable(y_test)
outputs = model(X_test)
_, pred = torch.max(outputs, 1)
testing_correct += torch.sum(pred == y_test.data)
print("Loss is :{:.4f},Train Accuracy is:{:.4f}%,Test Accuracy is:{:.4f}%".format(running_loss / len(data_train),
100 * running_correct / len(
data_train),
100 * testing_correct / len(
data_test)))