Cleaner Code of Linear Regression
更简洁的线性回归实现
import numpy as np
import torch
from torch.utils import data
from d2l import torch as d2l
from torch import nn
# 生成数据集
true_w = torch.tensor([2,-3.4])
true_b = 4.2
featrues , labels = d2l.synthetic_data(true_w , true_b , 1000) # 这里生成的是真实的y我们后面会预测他和期待值之间的差距
# 读取数据集
def load_data(data_arrays , batch_size , is_train=True): # @save
# is_train is for torch.shuffle()
'''构造一个pytorch数据迭代器'''
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset , batch_size , shuffle = is_train)
# test
batch_size = 10
data_iter = load_data((featrues , labels) , batch_size)
next(iter(data_iter))
[tensor([[-0.8651, -0.6773],
[-0.5669, -0.3115],
[-0.4197, -0.8198],
[-0.1276, -0.3684],
[-0.8580, -0.8185],
[-0.4955, 0.1050],
[ 0.2148, 0.1534],
[-2.0009, 0.6106],
[-1.3546, 1.1515],
[-0.2777, -0.7998]]),
tensor([[ 4.7699],
[ 4.1208],
[ 6.1584],
[ 5.1972],
[ 5.2708],
[ 2.8533],
[ 4.1034],
[-1.8997],
[-2.4302],
[ 6.3858]])]
# 定义模型
net = nn.Sequential(nn.Linear(2 , 1))
print(net[0].weight.data)
print(net[0].bias.data)# 访问隐藏层和偏置层
tensor([[-0.0941, 0.1806]])
tensor([0.2378])
net[0].weight.data.normal_(0 , 0.01)
net[0].bias.data.fill_(0) #通过data.normal_和fill_重写参数值
print(net[0].weight.data)
print(net[0].bias.data)# 访问隐藏层和偏置层
tensor([[ 0.0048, -0.0003]])
tensor([0.])
loss = nn.MSELoss()
trainer = torch.optim.SGD(net.parameters() , lr = 0.03)# 定义优化算法
num_eopch = 3
for epoch in range(num_eopch) :
for X , y in data_iter :
l = loss(net(X) , y)
trainer.zero_grad()
l.backward()
trainer.step()
l = loss(net(featrues) , labels)
print(f'epoch{epoch + 1} , loss {l :f}')
epoch1 , loss 0.000185
epoch2 , loss 0.000097
epoch3 , loss 0.000096
w = net[0].weight.data
print(f'w 的估计误差 {true_w - w.reshape(true_w.shape)}')
w 的估计误差 tensor([ 0.0002, -0.0007])
b = net[0].bias.data
print(f'b 的估计误差 {true_b - b}')
b 的估计误差 tensor([0.0002])