import torch
from torchvision import datasets, transforms
import time
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,),(0.5,))])# Download and load the training data
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)# Download and load the test data
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
# TODO: Define your network architecture herefrom torch import nn, optim
import torch.nn.functional as F
from sklearn.metrics import accuracy_score
classNet(nn.Module):def__init__(self):super(Net, self).__init__()
self.conv1 = nn.Conv2d(1,20,5,1)
self.conv2 = nn.Conv2d(20,50,5,1)
self.fc1 = nn.Linear(4*4*50,500)
self.fc2 = nn.Linear(500,10)defforward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x,2,2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x,2,2)
x = x.view(-1,4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)return F.log_softmax(x, dim=1)defpredict(self,x):
pred = self.forward(x).argmax(dim=1, keepdim=True).cpu().detach().numpy().reshape(1,-1)[0]return pred
# TODO: Create the network, define the criterion and optimizer
device = torch.device("cuda"if torch.cuda.is_available()else"cpu")# model = Classifier_fashion()
criterion = nn.NLLLoss()# optimizer = optim.Adam(model.parameters(),lr=0.003)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(),lr=0.003)
import numpy as np
import torch
classEarlyStopping:def__init__(self, patience=7, verbose=False, delta=0):
self.patience = patience
self.verbose = verbose
self.counter =0
self.best_score =None
self.early_stop =False
self.val_loss_min = np.Inf
self.delta = delta
def__call__(self, val_loss, model):
score =-val_loss
if self.best_score isNone:
self.best_score = score
self.save_checkpoint(val_loss, model)elif score < self.best_score + self.delta:
self.counter +=1print(f'EarlyStopping counter: {self.counter} out of {self.patience}')if self.counter >= self.patience:
self.early_stop =Trueelse:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter =0defsave_checkpoint(self, val_loss, model):'''Saves model when validation loss decrease.'''if self.verbose:print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(),'checkpoint.pt')
self.val_loss_min = val_loss
epoch: 1
Average trainning loss:1.8171
Average Accuracy:0.4848
Test set: Average loss: 1.0191
Accuracy: 6506/10000 (65%)
total acc:0.5677
Training complete in 0m 17s
epoch: 2
Average trainning loss:0.8103
Average Accuracy:0.7237
Test set: Average loss: 0.7470
Accuracy: 7318/10000 (73%)
total acc:0.7277
Training complete in 0m 17s
...
epoch: 198
Average trainning loss:0.0796
Average Accuracy:0.9936
Test set: Average loss: 0.3552
Accuracy: 9007/10000 (90%)
total acc:0.9471
Training complete in 0m 18s
epoch: 199
Average trainning loss:0.0789
Average Accuracy:0.9940
Test set: Average loss: 0.3161
Accuracy: 9079/10000 (91%)
total acc:0.9509
Training complete in 0m 18s
epoch: 200
Average trainning loss:0.0761
Average Accuracy:0.9946
Test set: Average loss: 0.3057
Accuracy: 9109/10000 (91%)
total acc:0.9527
Training complete in 0m 18s
epoch: 1
Average trainning loss:1.5747
Average Accuracy:0.5249
Test set: Average loss: 0.8811
Accuracy: 6810/10000 (68%)
Validation loss decreased (inf --> 0.881058). Saving model ...
total acc:0.6029
Training complete in 0m 18s
epoch: 2
Average trainning loss:0.7524
Average Accuracy:0.7442
Test set: Average loss: 0.7077
Accuracy: 7396/10000 (74%)
Validation loss decreased (0.881058 --> 0.707704). Saving model ...
total acc:0.7419
Training complete in 0m 18s
...
epoch: 57
Average trainning loss:0.2447
Average Accuracy:0.9316
Test set: Average loss: 0.3118
Accuracy: 8885/10000 (89%)
EarlyStopping counter: 9 out of 10
total acc:0.9100
Training complete in 0m 19s
epoch: 58
Average trainning loss:0.2426
Average Accuracy:0.9325
Test set: Average loss: 0.3106
Accuracy: 8875/10000 (89%)
EarlyStopping counter: 10 out of 10
Early stopping
total acc:0.9100
Training complete in 0m 19s
text_labels =['t-shirt','trouser','pullover','dress','coat','sandal','shirt','sneaker','bag','ankle boot']
dic ={}for i in text_labels:
dic[i]=0
trainloader_total = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=True)for images, labels in trainloader_total:# img = images[0].reshape((28, 28)).numpy()# plt.imshow(img)# plt.title(text_labels[labels[0]])
dic[text_labels[labels[0]]]= dic[text_labels[labels[0]]]+1# plt.show()
num =[]for i in dic:
num.append(dic[i])
fig = plt.figure(figsize=(14,8))
plt.bar(range(len(num)),num)
plt.xticks(range(len(num)),text_labels)
plt.ylabel("Number of samples")
import torchfrom torchvision import datasets, transformsimport timeimport numpy as npimport matplotlib.pyplot as plt from PIL import Image# Define a transform to normalize the datatransform = transforms.Compose([transforms.ToTensor(),