Classifying images from Fashion MNIST using feedforward neural networks
import torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
from torchvision.datasets import FashionMNIST
from torchvision.transforms import ToTensor
from torchvision.utils import make_grid
from torch.utils.data.dataloader import DataLoader
from torch.utils.data import random_split
%matplotlib inline
for images, labels in train_loader:print('images.shape:', images.shape)
plt.figure(figsize=(16,8))
plt.axis('off')
plt.imshow(make_grid(images, nrow=16).permute((1,2,0)))break
images.shape: torch.Size([128, 1, 28, 28])
Model
defaccuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)return torch.tensor(torch.sum(preds == labels).item()/len(preds))classMnistModel(nn.Module):"""Feedfoward neural network with 1 hidden layer"""def__init__(self, in_size, out_size):super().__init__()# hidden layer
self.linear1 = nn.Linear(in_size,16)# hidden layer 2
self.linear2 = nn.Linear(16,32)# output layer
self.linear3 = nn.Linear(32, out_size)defforward(self, xb):# Flatten the image tensors
out = xb.view(xb.size(0),-1)# Get intermediate outputs using hidden layer 1
out = self.linear1(out)# Apply activation function
out = F.relu(out)# Get intermediate outputs using hidden layer 2
out = self.linear2(out)# Apply activation function
out = F.relu(out)# Get predictions using output layer
out = self.linear3(out)return out
deftraining_step(self, batch):
images, labels = batch
out = self(images)# Generate predictions
loss = F.cross_entropy(out, labels)# Calculate lossreturn loss
defvalidation_step(self, batch):
images, labels = batch
out = self(images)# Generate predictions
loss = F.cross_entropy(out, labels)# Calculate loss
acc = accuracy(out, labels)# Calculate accuracyreturn{'val_loss': loss,'val_acc': acc}defvalidation_epoch_end(self, outputs):
batch_losses =[x['val_loss']for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()# Combine losses
batch_accs =[x['val_acc']for x in outputs]
epoch_acc = torch.stack(batch_accs).mean()# Combine accuraciesreturn{'val_loss': epoch_loss.item(),'val_acc': epoch_acc.item()}defepoch_end(self, epoch, result):print("Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(epoch, result['val_loss'], result['val_acc']))
Using a GPU
torch.cuda.is_available()
True
defget_default_device():"""Pick GPU if available, else CPU"""if torch.cuda.is_available():return torch.device('cuda')else:return torch.device('cpu')
device = get_default_device()
device
device(type='cuda')
defto_device(data, device):"""Move tensor(s) to chosen device"""ifisinstance(data,(list,tuple)):return[to_device(x, device)for x in data]return data.to(device, non_blocking=True)
classDeviceDataLoader():"""Wrap a dataloader to move data to a device"""def__init__(self, dl, device):
self.dl = dl
self.device = device
def__iter__(self):"""Yield a batch of data after moving it to device"""for b in self.dl:yield to_device(b, self.device)def__len__(self):"""Number of batches"""returnlen(self.dl)
losses =[x['val_loss']for x in history]
plt.plot(losses,'-x')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Loss vs. No. of epochs');
accuracies =[x['val_acc']for x in history]
plt.plot(accuracies,'-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs');