12.线性回归pytorch实现

import numpy as np
import torch
from torch.utils import data
from d2l import torch as d2l
true_w = torch.tensor([2, -3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w, true_b, 1000)
def load_array(datast,batch_size,is_train=True):
    dataset = data.TensorDataset(*datast)
    return data.DataLoader(dataset, batch_size, shuffle=is_train)
batch_size = 10
data_iter = load_array((features,labels), batch_size)
next(iter(data_iter))
[tensor([[-2.0451,  0.4303],
         [ 0.0140,  0.5225],
         [-1.2557, -2.9568],
         [ 0.1168, -0.4430],
         [-0.3319, -0.5382],
         [ 0.3172,  0.3607],
         [-1.4231, -0.6639],
         [ 0.3062, -0.5283],
         [ 0.7471, -0.9899],
         [ 2.4132,  1.5989]]),
 tensor([[-1.3573],
         [ 2.4451],
         [11.7485],
         [ 5.9291],
         [ 5.3722],
         [ 3.6172],
         [ 3.6262],
         [ 6.5933],
         [ 9.0530],
         [ 3.5918]])]
# nn是神经网络的缩写
from torch import nn

net = nn.Sequential(nn.Linear(2, 1))
net
Sequential(
  (0): Linear(in_features=2, out_features=1, bias=True)
)
#初始化模型参数
net[0].weight.data.normal_(0,0.01)
net[0].bias.data.fill_(0)
tensor([0.])
#损失函数
loss = nn.MSELoss()
#优化算法
optimizer = torch.optim.SGD(net.parameters(), lr=0.03)
#训练
"""
通过调用net(X)生成预测并计算损失l(前向传播)。

通过进行反向传播来计算梯度。

通过调用优化器来更新模型参数。"""
num_epochs = 3
for epoch in range(num_epochs):
    for X, y in data_iter:
        l = loss(net(X) ,y)
        optimizer.zero_grad()#梯度清零
        l.backward()#反向传播
        optimizer.step()#更新参数
    l = loss(net(features), labels)
    print(f'epoch {epoch + 1}, loss {l:f}')
epoch 1, loss 0.000207
epoch 2, loss 0.000102
epoch 3, loss 0.000101
#查看权重值
w = net[0].weight.data
print('w的估计误差:', true_w - w.reshape(true_w.shape))
b = net[0].bias.data
print('b的估计误差:', true_b - b)
w的估计误差: tensor([0.0002, 0.0009])
b的估计误差: tensor([-9.2506e-05])
#如果将小批量损失的平均值替换为小批量的总损失,你需要如何更改学习率?
net = nn.Sequential(nn.Linear(2, 1))
net[0].weight.data.normal_(0,0.01)
net[0].bias.data.fill_(0)
loss = nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
num_epochs = 3
for epoch in range(num_epochs):
    for X, y in data_iter:
        l = loss(net(X) ,y)/batch_size
        optimizer.zero_grad()
        l.backward()
        optimizer.step()
    l = loss(net(features), labels)
    print(f'epoch {epoch + 1}, loss {l:f}')
epoch 1, loss 0.101957
epoch 2, loss 0.103731
epoch 3, loss 0.100250
#访问线性回归的梯度
net[0].weight.grad
tensor([[0.0043, 0.0030]])
#用Huber损失代替原损失
loss = nn.HuberLoss()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值