一.Dropout原理
简单来说,就是在前向传播的时候,让某个神经元的激活值以一定的概率p停止工作,这样可以使模型泛化性更强,防止过拟合。
dim_in = 28*28
dim_hidden = 158
dim_out = 10
class TwoLayerNet(torch.nn.Module):
def __init__(self,dim_in,dim_hidden,dim_out):
super(TwoLayerNet,self).__init()
self.fc1 = torch.nn.Linear(dim_in,din_hid,bias=True)
self.fc2 = torch.nn.Linear(dim_hid,dim_out,bias=True)
def forward(self,x):
x = x.view(x.size(0),-1)
x = self.fc1(x)
x = F.relu(x)
x = F.dropout(x,p=0.5)
x = self.fc2(x)
return F.log_softmax(x,dim=1)
#之后定义模型
二.pytorch实现L1、L2正则化
import torch
from torch.nn import functional as F
from torch.autograd import Variable
class MLP(torch.nn.Module):
def __init__(self):
super(MLP,self).__init__()
se