从零实现线性回归【基于Pytorch】

参考资料:沐神——动手学深度学习

import random
import matplotlib
import math
import time
import numpy as np
import torch
from d2l import torch as d2l

#-------------------- 生成输入数据 --------------------------#
def synthetic_data(w,b,num_examples):
    x = torch.normal(0,1,(num_examples,len(w)))
    y = torch.matmul(x,w) + b
    y += torch.normal(0,0.01,y.shape)
    return x,y.reshape((-1,1))

true_w = torch.tensor([2,-3.4])
true_b = 4.2
features, labels = synthetic_data(true_w,true_b,1000)
# print('features:', features[0], '\nlabel:', lables[0])
# d2l.set_figsize()
# d2l.plt.scatter(features[:,1].detach().numpy(), labels.detach().numpy(),1)
# d2l.plt.show()

def data_iter(batch_size, features, labels):
    num_examples = len(features)
    indices = list(range(num_examples))
    # print("indices:",indices)
    random.shuffle(indices)
    for i in range(0,num_examples,batch_size):
        batch_indices = torch.tensor(indices[i:min(i+batch_size,num_examples)])
        yield features[batch_indices], labels[batch_indices]    # 返回迭代器,每调用一次函数返回一组features 和 labels,直到超出range范围
batch_size = 10
for x,y in data_iter(batch_size,features,labels):
    print(x,'\n',y)
    break
# ----------------------- 构建模型 ----------------------- #
w = torch.normal(0,0.01,size = (2,1),requires_grad = True)
b = torch.zeros(1,requires_grad = True)
def linreg(x,w,b):
    return torch.matmul(x,w) + b
# 损失函数
def squared_loss(y_hat,y):
    return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
# 定义优化算法(随机梯度)
def SGD(params,learning_rate,batch_size):
    with torch.no_grad():
        for param in params:
            param -= learning_rate * param.grad / batch_size
            param.grad.zero_()  # 手动把梯度设置为0,因为torch计算梯度会累积上次的计算结果

# ----------- 训练过程 ------------------- #
learning_rate = 0.03
num_epochs = 3
net = linreg
loss = squared_loss

for epoch in range(num_epochs):
    for x,y in data_iter(batch_size,features,labels):
        y_hat = net(x,w,b)
        l = loss(y_hat,y)
        l.sum().backward()
        SGD([w,b],learning_rate,batch_size)
    with torch.no_grad():
        train_l = loss(net(features,w,b),labels)
        print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')

# ----- 比较真实参数和训练得到的参数来评估模型训练的成功程度 ------ #
print(f'w的估计误差: {true_w - w.reshape(true_w.shape)}')
print(f'b的估计误差: {true_b - b}')
  • 8
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值