线性回归

线性回归

import torch
import random

class LinerRegression():
    """线性模型
    """
    def __init__(self, inputDim):
        """[summary]

        Args:
            inputDim (int): 输入维数
        """
        self.w = torch.normal(0, 0.01, size=(inputDim,1),requires_grad=True)
        self.b = torch.zeros(1, requires_grad=True)
    
    def forward(self,x):
        """前馈运算
        """
        return torch.matmul(x, self.w) + self.b
    
    def __call__(self, x):
        return self.forward(x)    
    
    
def synthetic_data(w, b, num_examples): 
    """生成 y = Xw + b + 噪声。"""
    X = torch.normal(0, 1, (num_examples, len(w)))
    y = torch.matmul(X, w) + b
    y += torch.normal(0, 0.01, y.shape)
    return X, y.reshape((-1, 1))    

def data_iter(batch_size, features, labels,shuffle=True):
    """数据生成器

    Args:
        batch_size (int): 小批量大小
        features (Tensor): 特征
        labels (Tensor): 标签
        shuffle (bool, optional):  Defaults to True.

    Yields:
        [Tensor, Tensor]: [特征, 标签]
    """
    num_examples = len(features)
    if shuffle:
        indices = list(range(num_examples))
        # 这些样本是随机读取的,没有特定的顺序
        random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        batch_indices = torch.tensor(indices[i:min(i +
                                                   batch_size, num_examples)])
        yield features[batch_indices], labels[batch_indices]

def SquredLoss(y_hat, y):
    return (y_hat-y.reshape(y_hat.shape))**2/2

def MSELoss(y_hat, y):
    """MSE损失函数

    Args:
        y_hat (Tensor): 预测值
        y (Tensor): 真实值

    Returns:
        Tensor: MSELoss
    """
    return SquredLoss(y_hat, y).mean()
            
def trainSGD(model, features, labels, EpochsNum, 
          BatchSize, Lr, shuffle=True, LossFn=MSELoss):
    """随机梯度下降

    Args:
        model (LinerRegression): 线性模型
        features (Tensor): 特征
        labels (Tensor): 标签
        EpochsNum (int): 训练轮次
        BatchSize (int): 批量大小
        Lr (float): 学习率
        shuffle (bool, optional):  Defaults to True.
        LossFn (function, optional): 损失函数 Defaults to MSELoss.
    """
    print("Training...")
    for epoch in range(1,EpochsNum+1):
        for X, y in data_iter(BatchSize, features, labels, shuffle=shuffle):
            loss = LossFn(model(X), y)
            loss.backward()
            with torch.no_grad():
                for param in [model.w, model.b]:
                    param -= param.grad * Lr / BatchSize
                    param.grad.zero_()

        print("epoch: {}, loss: {}".format(epoch, loss.item()))
            

if __name__ == '__main__':
    true_w = torch.tensor([2, -3.4])
    true_b = 4.2
    features, labels = synthetic_data(true_w, true_b, 1000)
    lr = 0.03
    num_epochs = 20
    model = LinerRegression(inputDim=features.shape[1])
    loss = MSELoss
    batch_size=10
    trainSGD(model, features, labels, num_epochs, batch_size, lr)
       
    print(f'w的估计误差: {true_w - model.w.reshape(true_w.shape)}')
    print(f'b的估计误差: {true_b - model.b}')

运行结果

Training...
epoch: 1, loss: 8.205362319946289
epoch: 2, loss: 4.800352096557617
epoch: 3, loss: 3.928896427154541
epoch: 4, loss: 2.79616117477417
epoch: 5, loss: 0.8374420404434204
epoch: 6, loss: 0.3771420121192932
epoch: 7, loss: 0.14397284388542175
epoch: 8, loss: 0.14508801698684692
epoch: 9, loss: 0.09872561693191528
epoch: 10, loss: 0.052366793155670166
epoch: 11, loss: 0.01637372002005577
epoch: 12, loss: 0.008798504248261452
epoch: 13, loss: 0.008416389115154743
epoch: 14, loss: 0.005424945615231991
epoch: 15, loss: 0.003018216695636511
epoch: 16, loss: 0.0011815772159025073
epoch: 17, loss: 0.0010456482414156199
epoch: 18, loss: 0.0004569842421915382
epoch: 19, loss: 0.0003230765287298709
epoch: 20, loss: 0.00011850629380205646
w的估计误差: tensor([ 0.0091, -0.0094], grad_fn=<SubBackward0>)
b的估计误差: tensor([0.0104], grad_fn=<RsubBackward1>)

参考

李沐《动手学深度学习》

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值