使用均方范数作为硬性限制
- 通过限制参数值的选择范围来控制模型容量$min l(w,b) \quad subject to \quad \left |w \right |^2 \leq \theta $
- 通常不限制偏移b(限不限制都差不多)
- 小的 θ \theta θ意味着更强的正则项
使用均方范数作为柔性限制
- 对每个
θ
\theta
θ,都可以找到
λ
\lambda
λ使得之前的目标函数等价于下面
m
i
n
l
(
w
,
b
)
+
λ
2
∥
w
∥
2
min l(w, b)+ \frac{\lambda}{2} \left \|w \right \|^2
minl(w,b)+2λ∥w∥2
- 可以通过拉格朗日乘子来证明
- 超参数
λ
\lambda
λ控制了正则项的重要程度
- λ = 0 \lambda = 0 λ=0:无作用
- λ → ∞ , w ∗ → 0 \lambda \rightarrow \infty, w^* \rightarrow 0 λ→∞,w∗→0
参数更新法则
- 计算梯度: ∂ ( l ( w , b ) + λ 2 ∥ w ∥ 2 ) ∂ w = ∂ l ( w , b ) ∂ w + λ w \frac{\partial (l(w,b) + \frac{\lambda}{2}\left \| w \right \|^2)}{\partial w} = \frac{\partial l(w, b)}{\partial w} + \lambda w ∂w∂(l(w,b)+2λ∥w∥2)=∂w∂l(w,b)+λw
- 时间t更新参数
w
t
+
1
=
(
1
−
η
λ
)
w
t
−
η
∂
l
(
w
t
,
b
t
)
∂
w
t
w_{t+1} = (1-\eta \lambda )w_t - \eta \frac{\partial l(w_t, b_t)}{\partial w_t}
wt+1=(1−ηλ)wt−η∂wt∂l(wt,bt)
- 通常 η λ ≤ 1 \eta \lambda \leq 1 ηλ≤1,在深度学习中通常叫做权重衰退
总结
- 权重衰退通过L2正则项使得模型参数不会过大,从而控制模型复杂度。
- 正则项权重是控制模型复杂度的超参数
权重衰退
权重衰退是最广泛使用的正则化的技术之一
import torch
from torch import nn
from d2l import torch as d2l
像以前一样生成一些数据
y
=
0.05
+
Σ
i
=
1
d
0.01
x
i
+
ϵ
w
h
e
r
e
ϵ
∼
N
(
0
,
0.0
1
2
)
y=0.05+\Sigma_{i=1}^{d}0.01x_i + \epsilon \quad where \epsilon \sim N(0,0.01^2)
y=0.05+Σi=1d0.01xi+ϵwhereϵ∼N(0,0.012)
n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5
true_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05
train_data = d2l.synthetic_data(true_w, true_b, n_train)
train_iter = d2l.load_array(train_data, batch_size)
test_data = d2l.synthetic_data(true_w, true_b, n_test)
test_iter = d2l.load_array(test_data, batch_size, is_train=False)
初始化模型参数
def init_params():
w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
return [w, b]
定义 L 2 L_2 L2范数惩罚
def l2_penalty(w):
return torch.sum(w.pow(2)) / 2
定义训练函数
def train(lambd):
w, b = init_params()
net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss
num_epochs, lr = 100, 0.003
animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log',
xlim=[5, num_epochs], legend=['train', 'test'])
for epoch in range(num_epochs):
for X, y in train_iter:
# 增加了L2范数惩罚项,广播机制使l2_penalty(w)成为一个长度为`batch_size`的向量。
l = loss(net(X), y) + lambd * l2_penalty(w)
l.sum().backward()
d2l.sgd([w, b], lr, batch_size)
if (epoch + 1) % 5 == 0:
animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss),
d2l.evaluate_loss(net, test_iter, loss)))
print('w的L2范数是:', torch.norm(w).item())
train(lambd=0)
w的L2范数是: 13.561446189880371
train(lambd=3)
w的L2范数是: 0.3731406033039093
def train_concise(wd):
net = nn.Sequential(nn.Linear(num_inputs, 1))
for param in net.parameters():
param.data.normal_()
loss = nn.MSELoss()
num_epochs, lr = 100, 0.003
# 偏置参数没有衰减。
trainer = torch.optim.SGD([{
"params": net[0].weight,
'weight_decay': wd}, {
"params": net[0].bias}], lr=lr)
animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log',
xlim=[5, num_epochs], legend=['train', 'test'])
for epoch in range(num_epochs):
for X, y in train_iter:
with torch.enable_grad():
trainer.zero_grad()
l = loss(net(X), y)
l.backward()
trainer.step()
if (epoch + 1) % 5 == 0:
animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss),
d2l.evaluate_loss(net, test_iter, loss)))
print('w的L2范数:', net[0].weight.norm().item())
train_concise(0)
w的L2范数: 13.962190628051758
train_concise(3)
w的L2范数: 0.41519707441329956