更小权重矩阵的神经网络导致更简单的模型。所以在损失函数后面添加一个数,让损失函数变小。
损失函数太大了,让损失函数的值变小一点点。
import torch
import torch.nn as nn
class RegL1Loss(nn.Module):
def __init__(self,lam,params):
super(RegL1Loss,self).__init__()
self.lam = lam
self.params = list(params)
self.n_param = len(self.params)
def forward(self):
ls = 0.0
for param in self.params:
# L1正则化
ls +=torch.mean(torch.abs(param))
# L2正则化 torch.mean(torch.pow(param))
return self.lam*ls/self.n_param
class Network(nn.Module):
def __init__(self):
super(Network,self).__init__()
self.model = nn.Sequential(
nn.Linear(19,128),
nn.ReLU(),
nn.Linear(128,256),
nn.ReLU(),
nn.Linear(256,3)
)
def forward(self,x):
return self.model(x)
if __name__=='__main__':
net = Network()
loss_fn = nn.MSELoss()
reg_l1_loss_fn = RegL1Loss(lam=0.1,params=net.parameters())
# _loss = loss_fn(None,None) #获取任何损失(基于预测和实际值)
_ll_loss = reg_l1_loss_fn() #求解惩罚项损失
print(_ll_loss)
# _loss = _loss+_ll_loss