基础知识:
梯度下降通过不断沿着反梯度方向更新参数求解;我们的目标是得到损失函数的最小值,根据公式我的理解是损失函数对模型的参数求偏导得到梯度,添上负号得到负梯度(也就是梯度下降)
在代码实现中,用小批量随机梯度下降,是深度学习默认的求解算法;
两个重要的超参数(超参数是人为规定的参数):学习率和批量大小。
import random
import torch
import numpy as np
from d2l import torch as d2l
#构造数据集
def synthetie_data(w,b,num_examples):
#生成 y=Xw + b + 噪声
X = torch.normal(0,1,(num_examples,len(w)))
y = torch.matmul(X,w) + b
y += torch.normal(0,0.01,y.shape)
return X,y.reshape((-1,1))
true_w = torch.tensor([2,-3.4]) #shape=(2,)
true_b = 4.2
features,labels = synthetie_data(true_w,true_b,1000)
print('features:',features[0],'\n label:',labels[0])
d2l.set_figsize()
d2l.plt.scatter(features[:,1].detach().numpy(),
labels.detach().numpy(),1)
d2l.plt.show()
#实现函数读取小批量
def data_iter(batch_size,features,labels):
num_enamples = len(features)
indices = list(range(num_enamples))
random.shuffle(indices)
for i in range(0,num_enamples,batch_size):
batch_indices = torch.tensor(indices[i:min(i + batch_size,num_enamples)])
yield features[batch_indices],labels[batch_indices]
batch_size = 10
for X ,y in data_iter(batch_size,features,labels):
pass
#定义初始化模型参数
w = torch.normal(0,0.01,size=(2,1),requires_grad=True)
b = torch.zeros(1,requires_grad=True)
#定义模型
def linreg(X,w,b):
return torch.matmul(X,w) + b
#定义损失函数
def squared_loss(y_hat,y):
return (y_hat - y.reshape(y_hat.shape)) **2 /2
#定义优化算法
def sgd(params,lr,batch_size):
with torch.no_grad():
for param in params:
param -= lr * param.grad / batch_size
param.grad.zero_() #梯度清零
#训练过程
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X,y in data_iter(batch_size,features,labels):
l = loss(net(X,w,b),y)
l.sum().backward()
sgd([w,b],lr,batch_size)
with torch.no_grad():
train_1 = loss(net(features,w,b),labels)
print(f'epoch {epoch + 1},loss{float(train_1.mean()):f}')
调包:
#线性回归的简介实现
import numpy as np
import torch
from torch.utils import data
from d2l import torch as d2l
from torch import nn
true_w = torch.tensor([2,-3.4])
true_b = 4.2
features,labels = d2l.synthetic_data(true_w,true_b,num_examples=1000)
def load_array(data_arrays,batc_size,is_train = True):
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset,batc_size,shuffle=True)
batch_size = 10
data_iter = load_array((features,labels),batch_size)
next(iter(data_iter))
net = nn.Sequential(nn.Linear(2,1))
#初始化模型参数
net[0].weight.data.normal_(0,0.01)
net[0].bias.data.fill_(0)
#均方误差,也称为平方L2范数
loss = nn.MSELoss()
#实例化SGD实例
trainer = torch.optim.SGD(net.parameters(),lr = 0.03)
#训练
num_epochs = 3
for epoch in range(num_epochs):
for X,y in data_iter:
l = loss(net(X),y)
trainer.zero_grad()
l.backward()
trainer.step()
l = loss(net(features),labels)
print(f'epoch {epoch + 1},loss {1:f}')