生成数据集
y
=
X
w
+
b
+
ϵ
\mathbf{y}= \mathbf{X} \mathbf{w} + b + \mathbf\epsilon
y=Xw+b+ϵ
X
∈
R
1000
×
2
、
w
=
[
2
,
−
3.4
]
⊤
、
b
=
4.2
、
ϵ
∼
N
(
0
,
0.01
)
\mathbf{X}\in \mathbb{R}^{1000 \times 2}、\mathbf{w} = [2, -3.4]^\top、b = 4.2、 \mathbf\epsilon\sim N(0,0.01)
X∈R1000×2、w=[2,−3.4]⊤、b=4.2、ϵ∼N(0,0.01)
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
# 这些样本是随机读取的,没有特定的顺序
random.shuffle(indices) # random.shuffle 将一个列表中的元素打乱顺序
for i in range(0, num_examples, batch_size):
batch_indices = torch.tensor(
indices[i: min(i + batch_size, num_examples)])
yield features[batch_indices], labels[batch_indices]
d2l.set_figsize()
d2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1);
读取数据集
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
# 这些样本是随机读取的,没有特定的顺序
random.shuffle(indices) # random.shuffle 将一个列表中的元素打乱顺序
for i in range(0, num_examples, batch_size):
batch_indices = torch.tensor(
indices[i: min(i + batch_size, num_examples)])
yield features[batch_indices], labels[batch_indices]
batch_size = 10
for X, y in data_iter(batch_size, features, labels):
print(X, '\n', y)
break
初始化模型参数
w = torch.normal(0, 0.01, size=(2,1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
定义模型
def linreg(X, w, b): #@save
"""线性回归模型"""
return torch.matmul(X, w) + b
损失函数
l ( i ) ( w , b ) = 1 2 ( y ^ ( i ) − y ( i ) ) 2 l^{(i)}(\mathbf{w}, b) = \frac{1}{2} \left(\hat{y}^{(i)} - y^{(i)}\right)^2 l(i)(w,b)=21(y^(i)−y(i))2
def squared_loss(y_hat, y): #@save
"""均方损失"""
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
优化算法
( w , b ) ← ( w , b ) − η ∣ B ∣ ∑ i ∈ B ∂ ( w , b ) l ( i ) ( w , b ) . (\mathbf{w},b) \leftarrow (\mathbf{w},b) - \frac{\eta}{|\mathcal{B}|} \sum_{i \in \mathcal{B}} \partial_{(\mathbf{w},b)} l^{(i)}(\mathbf{w},b). (w,b)←(w,b)−∣B∣ηi∈B∑∂(w,b)l(i)(w,b).
η \eta η表示学习率, ∣ B ∣ |\mathcal{B}| ∣B∣每个小批量的样本数
def sgd(params, lr, batch_size): #@save
"""小批量随机梯度下降"""
with torch.no_grad():
for param in params:
param -= lr * param.grad / batch_size
param.grad.zero_()
训练
lr = 0.03 # 学习率
num_epochs = 3 # 迭代周期个数
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y) # X和y的小批量损失
# 因为l形状是(batch_size,1),而不是一个标量。l中的所有元素被加到一起,
# 并以此计算关于[w,b]的梯度
l.sum().backward()
sgd([w, b], lr, batch_size) # 使用参数的梯度更新参数
with torch.no_grad():
train_l = loss(net(features, w, b), labels)
print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')
print(f'w的估计值: { w.reshape(true_w.shape)}')
print(f'b的估计值: { b}')
w的估计值: tensor([ 1.9997, -3.3994], grad_fn=)
b的估计值: tensor([4.1998], requires_grad=True)
参考
《动手学深度学习》,李沐