%matplotlib inline
import random
import torch
from d2l import torch as d2l
def synthetic_data(w, b, num_examples): #@save
"""生成y=Xw+b+噪声"""
X = torch.normal(0, 1, (num_examples, len(w)))
y = torch.matmul(X, w) + b
y += torch.normal(0, 0.01, y.shape)
return X, y.reshape((-1, 1))
true_w = torch.tensor([2, -3.4])
true_b = 4.2
features, labels = synthetic_data(true_w, true_b, 1000)
print('features:', features[0],'\nlabel:', labels[0])
d2l.set_figsize()
d2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1)
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
# 这些样本是随机读取的,没有特定的顺序
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
batch_indices = torch.tensor(
indices[i: min(i + batch_size, num_examples)])
yield features[batch_indices], labels[batch_indices]
batch_size = 10
for X, y in data_iter(batch_size, features, labels):
print(X, '\n', y)
break
w = torch.normal(0, 0.01, size=(2,1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
def linreg(X, w, b): #@save
"""线性回归模型"""
return torch.matmul(X, w) + b
def squared_loss(y_hat, y): #@save
"""均方损失"""
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
def sgd(params, lr, batch_size): #@save
"""小批量随机梯度下降"""
with torch.no_grad():
for param in params:
param -= lr * param.grad / batch_size
param.grad.zero_()
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y) # X和y的小批量损失
# 因为l形状是(batch_size,1),而不是一个标量。l中的所有元素被加到一起,
# 并以此计算关于[w,b]的梯度
l.sum().backward()
sgd([w, b], lr, batch_size) # 使用参数的梯度更新参数
with torch.no_grad():
train_l = loss(net(features, w, b), labels)
print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')
print(f'w的估计误差: {true_w - w.reshape(true_w.shape)}')
print(f'b的估计误差: {true_b - b}')
# 如果我们将权重初始化为零,会发生什么。算法仍然有效吗?
w = torch.zeros((2,1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
def linreg(X, w, b): #@save
"""线性回归模型"""
return torch.matmul(X, w) + b
def squared_loss(y_hat, y): #@save
"""均方损失"""
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
def sgd(params, lr, batch_size): #@save
"""小批量随机梯度下降"""
with torch.no_grad():
for param in params:
param -= lr * param.grad / batch_size
param.grad.zero_()
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y) # X和y的小批量损失
# 因为l形状是(batch_size,1),而不是一个标量。l中的所有元素被加到一起,
# 并以此计算关于[w,b]的梯度
l.sum().backward()
sgd([w, b], lr, batch_size) # 使用参数的梯度更新参数
with torch.no_grad():
train_l = loss(net(features, w, b), labels)
print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')
print(f'w的估计误差: {true_w - w.reshape(true_w.shape)}')
print(f'b的估计误差: {true_b - b}')
# 假设你是乔治·西蒙·欧姆,试图为电压和电流的关系建立一个模型。你能使用自动微分来学习模型的参数吗?
%matplotlib inline
import random
import torch
from d2l import torch as d2l
def synthetic_data_IU(r, b, numb_examples): #@save
"""生成电压电流的关系式"""
I = torch.rand(numb_examples,1)
u = I*r.T + b
return I, u.reshape((1, -1))
true_r = torch.tensor([2])
true_b = 0.2
features, labels = synthetic_data_IU(true_r, true_b, 1000)
def data_iter_IU(batch_size,features,labels):
numb_examples = len(features)
indices =list(range(numb_examples))
random.shuffle(indices)
for i in range(0,numb_examples,batch_size):
batch_indices=torch.tensor(indices[i:min(i+batch_size,numb_examples)])
yield features[batch_indices],labels[0,batch_indices]
batch_size =10
for I,u in data_iter_IU(batch_size,features,labels):
print(I,'\n',u)
break
r = torch.zeros(1 ,requires_grad=True)
b = torch.zeros(1, requires_grad=True)
def linreg(I, r, b):
'''线性回归模型。'''
return I*r.T+b
def squared_loss(u_hat, u):
'''均方损失。'''
return (u_hat - u.reshape(u_hat.shape)) ** 2 / 2
def sgd(params, lr, batch_size):
'''小批量随机梯度下降。'''
with torch.no_grad():
for param in params:
param -= lr * param.grad / batch_size
param.grad.zero_()
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for I, u in data_iter_IU(batch_size, features, labels):
l = loss(net(I, r, b), u)
l.sum().backward()
sgd([r, b], lr, batch_size) # 使用参数的梯度更新参数
with torch.no_grad():
train_l = loss(net(features, r, b), labels)
print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')
print(f'w的估计误差: {true_r - r.reshape(true_r.shape)}')
print(f'b的估计误差: {true_b - b}')
# 尝试使用不同的学习率,观察损失函数值下降的快慢
lr = 0.0003
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for I, u in data_iter_IU(batch_size, features, labels):
l = loss(net(I, r, b), u)
l.sum().backward()
sgd([r, b], lr, batch_size) # 使用参数的梯度更新参数
with torch.no_grad():
train_l = loss(net(features, r, b), labels)
print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')
print(f'w的估计误差: {true_r - r.reshape(true_r.shape)}')
print(f'b的估计误差: {true_b - b}')
# 尝试使用不同的学习率,观察损失函数值下降的快慢
lr = 3
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for I, u in data_iter_IU(batch_size, features, labels):
l = loss(net(I, r, b), u)
l.sum().backward()
sgd([r, b], lr, batch_size) # 使用参数的梯度更新参数
with torch.no_grad():
train_l = loss(net(features, r, b), labels)
print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')
print(f'w的估计误差: {true_r - r.reshape(true_r.shape)}')
print(f'b的估计误差: {true_b - b}')
线性回归的简洁实现
import numpy as np
import torch
from torch.utils import data
from d2l import torch as d2l
# 生成数据集
true_w = torch.tensor([2, -3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w, true_b, 1000)
# 读取数据集
def load_array(data_arrays, batch_size, is_train=True): #@save
"""构造一个PyTorch数据迭代器"""
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset, batch_size, shuffle=is_train)
batch_size = 10
data_iter = load_array((features, labels), batch_size)
next(iter(data_iter))
# 定义模型
# nn是神经网络的缩写
from torch import nn
net = nn.Sequential(nn.Linear(2, 1))
net[0].weight.data.normal_(0, 0.01)
net[0].bias.data.fill_(0)
# 定义损失函数
loss = nn.MSELoss()
loss1 = nn.L1Loss()
# 定义优化算法
trainer = torch.optim.SGD(net.parameters(), lr=0.03)
# 训练模型
num_epochs = 3
for epoch in range(num_epochs):
for X, y in data_iter:
l = loss(net(X) ,y)
trainer.zero_grad()
l.backward()
trainer.step()
l = loss(net(features), labels)
print(f'epoch {epoch + 1}, loss {l:f}')
w = net[0].weight.data
print('w的估计误差:', true_w - w.reshape(true_w.shape))
b = net[0].bias.data
print('b的估计误差:', true_b - b)
# 如果将小批量的总损失替换为小批量损失的平均值,你需要如何更改学习率?
# 定义优化算法
optimizer = torch.optim.SGD(net.parameters(), lr=0.0001)
# 训练模型
num_epochs = 3
for epoch in range(num_epochs):
for X, y in data_iter:
l = loss1(net(X) ,y)
optimizer.zero_grad()
l.backward()
optimizer.step()
l = loss1(net(features), labels)
print(f'epoch {epoch + 1}, loss {l:f}')
李沐动手学深度学习 https://zh-v2.d2l.ai/chapter_introduction/index.html