从零开始实现源代码总结:
#练习 线性回归的从零开始实现
import random
import torch
from d2l import torch as d2l
# 1.生成数据集
true_w = torch.tensor([2,-3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w,true_b,1000)
# 2.读取数据集
from torch.utils import data
batch_size = 10
data_iter = data.DataLoader(features,batch_size,shuffle=True)
label_iter = data.DataLoader(labels,batch_size,shuffle=True)
# 3.初始化模型参数
w = torch.normal(0,0.01,size=(2,1),requires_grad = True)
b = torch.zeros(1,requires_grad = True)
# 4.定义模型
def linreg(x,w,b):#线性回归模型
return torch.matmul(x,w)+b
# 5.定义损失函数 ----均方差
def squard_loss(y_hat,y):
return (y_hat - y.reshape(y_hat.shape)) ** 2/2
# 6.定义优化算法
def SGD(params,lr,batch_size):
""""
with torch.no_grad():
for param in params:
param -= lr * param.grad / batch_size
param.grad.zero_()
# 7.训练
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y) # X和y的小批量损失
# 因为l形状是(batch_size,1),而不是一个标量。l中的所有元素被加到一起,
# 并以此计算关于[w,b]的梯度
l.sum().backward()
sgd([w, b], lr, batch_size) # 使用参数的梯度更新参数
with torch.no_grad():
train_l = loss(net(features, w, b), labels)
print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')
此处为画图,使用matplotlib
import matplotlib.pyplot as plt
def synthetic_data(w, b, num_examples): #@save
"""生成y=Xw+b+噪声"""
X = torch.normal(0, 1, (num_examples, len(w)))
y = torch.matmul(X, w) + b
y += torch.normal(0, 0.01, y.shape)
return X, y.reshape((-1, 1))
true_w = torch.tensor([2, -3.4])
true_b = 4.2
features, labels = synthetic_data(true_w, true_b, 1000)
features, labels
plt.scatter(features[:,(1)],labels)
plt.show()
简洁实现代码总结
# 线性回归的简介实现
import numpy
import torch
from torch.utils import data
from d2l import torch as d2l
# 1.生成数据集
true_w = torch.tensor([2,-3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w,true_b,1000)
# 2.读取数据集
batch_size = 20
data_iter = data.DataLoader(features,batch_size,shuffle = True)
labels_iter = data.DataLoader(labels,batch_size,shuffle = True)
# 3.定义模型
from torch import nn
net = nn.Sequential(nn.Linear(2,1))
# 4.初始化模型参数
net[0].weight.data.normal_(0,0.01)
net[0].bias.data.fill_(0) #此处尝试不知零
# 5.定义损失函数
loss = nn.MSELoss()
# 6.定义优化算法
lr = 0.03
trainer = torch.optim.SGD(net.parameters(), lr)
# 7.训练
#先做损失,
num_epochs = 3
for eopch in range(num_epochs):
for x, y in zip(data_iter,labels_iter):
l = loss1(net(x),y)#计算损失
trainer.zero_grad()#梯度置零
l.backward()#反向传播计算梯度
trainer.step()#更新梯度
l = loss(net(features),labels)
print(f'epoch {epoch + 1}, loss {l:f}')
w = net[0].weight.data
print('w的估计误差:', true_w - w.reshape(true_w.shape))
b = net[0].bias.data
print('b的估计误差:', true_b - b)
额外的笔记
#接收批量大小的数据
import random
def data_iter(batch_size, features, labenls):
num_examples = len(features)
indices = list(range(num_examples)) #indices指标
random.shuffle(indices) #打乱顺序
读取数据使用DataLOader(data,batch_size)