虽然在实际应用中不一定会用到从0建立一个线性模型,但是为了更好的了解各部分的功能,我们从0建立一个线性回归模型
1、导入相应的包
%matplotlib inline#默认嵌入到notebook里面
import random
import torch
from d2l import torch as d2l
- 生成数据集
def synthetic_data(w,b,num_examples):
'''根据带有噪声的线性模型构造一个人造数据集
生成y = Xw +b+噪声'''
x = torch.normal(0,1,(num_examples,len(w)))#均值为0方差为1的随机数,n个样本,len(w)列
y = torch.matmul(X,w)+b#matmul矩阵运算
y+= torch.normal(0,0.01,y.shape)#噪音函数
return X,y.reshape((-1,1))#-1表示自动计算维度的大小
true_w = torch.tensor([2,-3.4])
true_b = 4.2
features,labels = synthetic_data(true_w,true_b,1000)
3、读取数据
#读取数据
def data_iter(batch_size,features,labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0,num_examples,batch_size):
batch_indices = torch.tensor(
indices[i:min(i+batch_size,num_examples)])
yield features[batch_indices],labels[batch_indices]
batch_size = 10
for X,y in data_iter(batch_size,features,labels):
print(X,'\n',y)
Break
4、初始化模型参数
#初始化模型参数
w = torch.normal(0,0.01,size=(2,1),requires_grad= True)
b = torch.zeros(1,requires_grad = True)
6、定义模型
def linger(X,w,b):
return torch.matmul(X,y)+b
7、定义损失函数
def squared_loss(y_hat,y):
return (y_hat-y.reshape(y_hat.shape))**2/2
- 定义优化算法
def sgd(params,lr,batch_size):
for param in params:
param -= lr*param.grad/batch_size
param.grad.zero_()
- 训练过程
#设置超参数
lr =0.03
num_epochs=3
net = linger
loss = squared_loss
for epoch in range(num_epochs):
for X,y in data_iter(batch_size,features,labels):
l = loss(net(X,w,b),y)
l.sum().backward()
sgd([w,b],lr,batch_size)#使用参数的梯度更新参数
with torch.no_grad():
train_l = loss(net(features,w,b),labels)
print(f'epoch{epoch+1},loss{float(train_l.mean()):f}')