MLP中实现dropout,批标准化
基本网络代码
import torch as pt
import torchvision as ptv
import numpy as np
train_set = ptv.datasets.MNIST("../../pytorch_database/mnist/train",train=True,transform=ptv.transforms.ToTensor(),download=True)
test_set = ptv.datasets.MNIST("../../pytorch_database/mnist/test",train=False,transform=ptv.transforms.ToTensor(),download=True)
train_dataset = pt.utils.data.DataLoader(train_set,batch_size=100)
test_dataset = pt.utils.data.DataLoader(test_set,batch_size=100)
class MLP(pt.nn.Module):
def __init__(self):
super(MLP,self).__init__()
self.fc1 = pt.nn.Linear(784,512)
self.fc2 = pt.nn.Linear(512,128)
self.fc3 = pt.nn.Linear(128,10)
def forward(self,din):
din = din.view(-1,28*28)
dout = pt.nn.functional.relu(self.fc1(din))
dout = pt.nn.functional.relu(self.fc2(dout))
return pt.nn.functional.softmax(self.fc3(dout))
model = MLP().cuda()
print(model)
# loss func and optim
optimizer = pt.optim.SGD(model.parameters(),lr=0.01,momentum=0.9)
lossfunc = pt.nn.CrossEntropyLoss().cuda()
# accuarcy
def AccuarcyCompute(pred,label):
pred = pred.cpu().data.numpy()
label = label.cpu().data.numpy()
test_np = (np.argmax(pred,1) == label)
test_np = np