Pytorch实现线性回归中的梯度推导和验证,线性回归用的torch.nn实现

代码:主要进行前向和梯度计算,没有进行反向传播

import numpy as np

x_values = [i for i in range(11)]
x_train = np.array(x_values, dtype=np.float32)
x_train = x_train.reshape(-1,1)
print(x_train.shape)

y_values = [2*i+1 for i in x_values]
y_train = np.array(y_values, dtype=np.float32)
y_train = y_train.reshape(-1, 1)
print(y_train.shape)


import torch
import torch.nn as nn

class LinearRegressionModel(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(LinearRegressionModel, self).__init__()
        self.linear = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        out = self.linear(x)
        return out

input_dim = 1
output_dim = 1

model = LinearRegressionModel(input_dim, output_dim)

epochs = 1
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
criterion = nn.MSELoss()


# torch.manual_seed(7) # cpu
# torch.cuda.manual_seed(7) #gpu
# np.random.seed(7) #numpy
# random.seed(7) #random and transforms
# torch.backends.cudnn.deterministic=True # cudnn

for epoch in range(epochs):
    epoch += 1

    inputs = torch.from_numpy(x_train)
    labels = torch.from_numpy(y_train)

    optimizer.zero_grad()
    model.linear.weight.data = torch.tensor([2], dtype=torch.float32).reshape([1,1])
    # model.linear.bias.data = torch.tensor([1], dtype=torch.float32).reshape([1,1])
    # torch.tensor().reshape([1,1])

    outputs = model(inputs)

    loss = criterion(outputs, labels)

    print(loss)
    print(torch.sum(pow(outputs-labels, 2))/11)
    print(outputs - labels)

    loss.backward()

    print(model.linear.weight.grad)
    print(model.linear.bias.grad)
    print(2*torch.mean(outputs - labels))

    print(model.linear.weight)
    print(model.linear.bias)

    # optimizer.step()

    if epoch % 50 == 0:
        print('epoch {}, loss{}'.format(epoch, loss.item()))

MSELoss 就是 \frac{1}{n}\sum_{i=1}^{n}\left ( \left [ outputs(i)-labels(i) \right ]^{2} \right )

outputs\left ( i \right ) = weight * x\left ( i \right ) + bias

对bias进行求导就是\frac{\partial loss}{\partial bias}=\sum_{i=1}^{n}\left ( \frac{\partial loss}{\partial outputs(i))}\frac{\partial outputs(i))}{\partial bias} \right )

\frac{\partial loss}{\partial outputs(i))}=\frac{2}{n}*\left ( outputs\left ( i \right ) - labels\left ( i \right ) \right )

\frac{\partial outputs\left ( i \right ))}{\partial bias }=1

\frac{\partial loss}{\partial bias}=\sum_{i=1}^{n}\frac{\partial loss}{\partial outputs(i))}\frac{\partial output\left ( i \right ))}{\partial bias}=\frac{2}{n}\sum_{i=1}^{n}\left ( outputs\left ( i \right ) - labels\left ( i \right ) \right )

 

同理

\frac{\partial loss}{\partial weight}=\sum_{i=1}^{n}\left ( \frac{\partial loss}{\partial outputs\left ( i \right )} \frac{\partial output\left ( i \right ))}{\partial weight}\right )=\sum_{i=1}^{n}\left [ \frac{2}{n} \left ( outputs\left ( i \right ) - labels\left ( i \right ) \right )*x\left ( i \right ) \right ] = \frac{2}{n} \sum_{i=1}^{n}\left [ \left ( outputs\left ( i \right ) - labels\left ( i \right ) \right )*x\left ( i \right ) \right ]

 

代码2:进行一次反向传播

import numpy as np

x_values = [i for i in range(11)]
x_train = np.array(x_values, dtype=np.float32)
x_train = x_train.reshape(-1,1)
print(x_train.shape)

y_values = [2*i+1 for i in x_values]
y_train = np.array(y_values, dtype=np.float32)
y_train = y_train.reshape(-1, 1)
print(y_train.shape)

import torch
import torch.nn as nn

class LinearRegressionModel(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(LinearRegressionModel, self).__init__()
        self.linear = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        out = self.linear(x)
        return out

input_dim = 1
output_dim = 1

model = LinearRegressionModel(input_dim, output_dim)

epochs = 1
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
criterion = nn.MSELoss()


torch.manual_seed(7) # cpu
torch.cuda.manual_seed(7) #gpu
np.random.seed(7) #numpy
# random.seed(7) #random and transforms
torch.backends.cudnn.deterministic=True # cudnn

for epoch in range(epochs):
    epoch += 1

    inputs = torch.from_numpy(x_train)
    labels = torch.from_numpy(y_train)

    optimizer.zero_grad()
    model.linear.weight.data = torch.tensor([2], dtype=torch.float32).reshape([1,1])
    # model.linear.bias.data = torch.tensor([1], dtype=torch.float32).reshape([1,1])
    # torch.tensor().reshape([1,1])

    outputs = model(inputs)

    loss = criterion(outputs, labels)

    print(loss)
    print(torch.sum(pow(outputs-labels, 2))/11)
    print(outputs - labels)

    loss.backward()

    print(model.linear.weight)
    print(model.linear.bias)

    print(model.linear.weight.grad)
    print(model.linear.bias.grad)
    # print(2*torch.mean(outputs - labels))

    optimizer.step()

    print('After back propagation..............')

    print(model.linear.weight)
    print(model.linear.bias)

    print(model.linear.weight.grad)
    print(model.linear.bias.grad)


    if epoch % 50 == 0:
        print('epoch {}, loss{}'.format(epoch, loss.item()))

 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值