前面两节都是在一个batch上进行的操作,本节整合前面的操作,并在完整的minst数据集上训练并测试模型
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets,transforms
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,),(0.5,))])
trainset = datasets.MNIST('MNIST_data/',download=True,train=True,transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,batch_size=64,shuffle=True)
# 整个数据集上训练
class MyFC(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784,128)
self.fc2 = nn.Linear(128,64)
self.fc3 = nn.Linear(64,10)
def forward(self,x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
# log_softmax 对应 nn.NLLLoss
x = F.log_softmax(self.fc3(x),dim=1)
return x
model = MyFC()
cost