深度学习线性回归(Linear Regression)从零搭建

from sympy.physics.control.control_plots import matplotlib
# %matplotlib inline
import random
import torch
from d2l import torch as d2l

# 1.1 生成数据集
def synthetic_data(w, b, num_examples):
    """生成 y = Xw + b + e(噪声)"""
    x = torch.normal(0, 1, (num_examples, 2))
    y = torch.matmul(x, w) + b
    y = y + torch.normal(0, 0.01, y.shape)
    return x, y.reshape((-1,1))

true_w = torch.tensor([2, -3.4])
true_b = 4.2
features, labels = synthetic_data(true_w, true_b, 1000)
print('features:', features[0], '\nlabel:', labels[0])

d2l.set_figsize()
d2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1);
d2l.plt.show()

# 2.1 创建一个函数打乱数据集中的样本,获取小批量样本用于训练
# 该函数接收批量大小、特征矩阵和标签向量作为输入,生成
# 大小为batch_size的小批量样本
def Get_MiniBatchData(batch_size, features, labels):
    num_examples = len(features)
    indices = list(range(num_examples))
    #这些样本是随机读取的,没有特定的顺序
    random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        batch_indeces = torch.tensor(indices[i:min(i+batch_size, num_examples)])
        yield features[batch_indeces], labels[batch_indeces]

batch_size1 = 20

# 2.2 读取第一个小批量数据样本并打印
for x, y in Get_MiniBatchData(batch_size1, features, labels):
    print(x, '\n', y)
    break

# 3.1 初始化模型参数
# 通过从均值为0、标准差为0.01的正态分布中采样随机数来初始化权重,并将偏置初始化为0
weight = torch.normal(0, 0.01, size=(2,1), requires_grad=True)
bais = torch.zeros(1, requires_grad=True)

# 4.1 构建线性回归模型
def Liner_Regression_Model(X, W, b):
    return torch.matmul(X, W) + b

# 5.1 定义损失函数Loss Function.
def Squared_Loss_Func(y_hat, y):
    return (y_hat - y.reshape(y_hat.shape))**2/2

# 6.1 定义优化函数, 小批量随机梯度下降优化。
def Stochastic_Gradient_Descent(params, lr, batch_size):
    with torch.no_grad():
        for param in params:
            param -= lr*param.grad/batch_size
            param.grad.zero_()

# 7.1 搭建训练过程
learning_rate = 0.05
num_epochs = 5
LinerRegressionNetwork = Liner_Regression_Model
LossFunc = Squared_Loss_Func

for epoch in range(num_epochs):
    for X, Y in Get_MiniBatchData(batch_size1, features, labels):
        L = LossFunc(LinerRegressionNetwork(X, weight, bais), Y)
        L.sum().backward()
        Stochastic_Gradient_Descent([weight, bais], learning_rate, batch_size1)
    with torch.no_grad():
        train_L = LossFunc(LinerRegressionNetwork(features, weight, bais), labels)
        print(f'epoch {epoch + 1}, loss {float(train_L.mean()):f}')


print('weight:', weight, '\nbais:', bais)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值