三、Pytouch 线性回归

目录

1、线性回归

2、示例一

3、示例二

(1)自编写代码方式

(2)PyTorch内置函数实现线性回归

4、示例三

5、示例四


1、线性回归

  1. 神经网络处理的主要两大类事物便是回归与分类。以一维线性回归问题为例讲解。

  2. 给定数据集 D = {(Xl , Yl) , (X2, Y2) , (X3 , Y3) ,… ,(Xm, Ym)}

  3. 线性回归希望能够优 化出一个好的函数 f(x) , 使得 f(xi)=ωxi + b 能够与 Yi 尽可能接近。

  4. 将f(x)比作神经网络,ω、 b为网络中需要训练的参数,而损失函数的作用即为衡量该神经网络输出f(xi)与yi的拟合度,并参照损失函数修改网络参数,使得结果更加趋近yi。

2、示例一

# encoding: utf-8

import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        # 最简单神经网络,1个全连接层,输入为x(一维)输出为y(一维)
        self.linear = nn.Linear(1,1)

    def forward(self, x):  #前向传递
        out = self.linear(x)
        return out


if __name__ == '__main__':
    """构建一维线性数据集"""
    # linspace(-1,1,100)是在(-1,1)范围内平均取100个点,
    # unquseeze(1)的作用是将 x 维度(100)变成(100,1)
    # 与神经网络输入维度保持一致(也可以用x = x.reshape(-1,1)达到相同目的)
    x = torch.linspace(-1, 1, 100).unsqueeze(1)
    # y = 2 * x + (一个范围在(0,1)的随机数)
    y = 2 * x + torch.rand(x.size())
    # 打印数据
    # x与y为tensor型,需转为numpy才能被画图包plt所用
    x1 = np.array(x)
    y1 = np.array(y)
    plt.scatter(x1, y1)  # 画(x,y)的散点图
    plt.show()
    """ 构建网络模型"""
    net = Net()
    """训练数据"""
    loss_fuc = nn.MSELoss()  # 损失函数
    # 学习效率为0.02的RMSprop优化器
    optimizer = torch.optim.RMSprop(net.parameters(), lr=0.02) 
    epochs = 1000  # 训练上述数据1000轮
    for epoch in range(epochs):
        # forward
        out = net(x)  # 输出预测值
        loss = loss_fuc(out, y)  # 预测值与真实值损失函数,参数位置不能颠倒
        # backward
        optimizer.zero_grad()  # 优化前使前一次梯度归0
        loss.backward()  # 损失函数反向传递
        optimizer.step()  # 执行优化
        # 输出训练中损失函数值
        if epoch % 20 == 0:
            print('epoch: {} | loss: {:.3f}'.format(epoch, loss.data))
    """输出训练好的网络的预测图线"""
    plt.scatter(x, y)
    out = net(x)
    print(out)
    plt.plot(x.data.numpy(), out.data.numpy())
    plt.show()
    



3、示例二

先看一张表:

表格第一列是地区,第二类是温度单位华氏度,第二列是降水量单位毫米,第四列是湿度,第五列是苹果产量,第六列是橙子产量。

下面这段代码的目的是为了预估出苹果和橙子在不同地区、不同环境中的产量。

yield_apple = w11 * temp + w12 * rainfall + w13 * humidity + b1yield_orange 
            = w21 * temp + w22 * rainfall + w23 * humidity + b2

分别给温度temp、降水量rainfall、湿度humidity加上不同的权重(w11,w12,w13),最后再加一个b1或者b2的偏差。

通过使用被称为梯度下降的优化技术,少量多次调整权重以获得更精准的预测结果。

(1)自编写代码方式

# encoding: utf-8

import torch
import numpy as np

def get_data():
    inputs = np.array([[73,67,43],
                       [91,88,64],
                       [87,134,58],
                       [102,43,37],
                       [69,96,70]],dtype='float32')
    targets = np.array([[56,70],
                        [81,101],
                        [119,133],
                        [22,37],
                        [103,119]],dtype='float32')
    return inputs,targets

def model(x):
    # pytorch数学运算
    # https://www.cnblogs.com/taosiyu/p/11599157.html
    return  x @ w.t() + b

def mse(t1,t2):
    """平方差矩阵"""
    diff = t1 - t2
    return torch.sum(diff * diff) / diff.numel()


if __name__ == '__main__':
    inputs, targets = get_data()
    inputs = torch.from_numpy(inputs)
    targets = torch.from_numpy(targets)
    # 定义变量和截距(偏差)
    w = torch.randn(2, 3, requires_grad=True)
    b = torch.randn(2, requires_grad=True)
    print(w)
    print(b)
    #
    for i in range(100):
        preds = model(inputs)
        loss = mse(preds,targets)
        # 计算梯度
        loss.backward()
        print("w.grad = " ,w.grad)
        with torch.no_grad():
            w -= w.grad * (10** -5)
            b -= b.grad * (10**-5)
            # 重置梯度
            w.grad.zero_()
            b.grad.zero_()

    print("loss = ",loss)
    print("preds = ",preds)

(2)PyTorch内置函数实现线性回归

# encoding: utf-8

import torch
import numpy as np
import torch.nn as nn
from torch.utils.data import TensorDataset,DataLoader
import torch.nn.functional as F
def get_data():
    inputs = np.array([[73,67,43],
                       [91,88,64],
                       [87,134,58],
                       [102,43,37],
                       [69,96,70]],dtype='float32')
    targets = np.array([[56,70],
                        [81,101],
                        [119,133],
                        [22,37],
                        [103,119]],dtype='float32')
    return inputs,targets

def fit(train_d1,num_epochs, model, loss_fn,opt):
    # 迭代num_epochs次
    for epoch in range(num_epochs):
        # 使用分批数据进行训练
        for xb,yb in train_d1:
            #训练模型
            pred = model(xb)
            # mse损失函数
            loss = loss_fn(pred,yb)
            # 计算梯度,反向传播
            loss.backward()
            # 使用梯度更新参数
            opt.step()
            # 重置梯度为 0
            opt.zero_grad()
        if(epoch+1) % 10 == 0:
            print('Epoch [{}/{}], Loss:{:0.4f}'.format(epoch+1,num_epochs,loss.item()))


if __name__ == '__main__':
    inputs, targets = get_data()
    inputs = torch.from_numpy(inputs)
    targets = torch.from_numpy(targets)
    # 接下来我们创建一个TensorDataset和一个DataLoader:
    # 将特征和对应标签合并在一起
    train_ds = TensorDataset(inputs,targets)
    # print(train_ds[:3])
    batch_size = 5
    train_d1 = DataLoader(train_ds,batch_size,shuffle=True)
    # 定义一个三个输入维度两个输出维度的线性函数模型,
    # 使用nn.linear自动完成初始化工作。
    model = nn.Linear(3,2)
    # print(model.weight)
    # print(model.bias)
    # print(model.parameters())
    # 用内置损失函数mse_loss
    loss_fn = F.mse_loss
    # loss = loss_fn(model(inputs),targets)
    # 优化的时候,我们可以使用优化器optim.SGD,不用手动操作模型的权重和偏差。
    opt = torch.optim.SGD(model.parameters(), lr=10**-5)
    # 训练模型
    fit(train_d1,100,model,loss_fn,opt)
    preds = model(inputs)
    print(preds)

4、示例三

一元回归模型

# encoding: utf-8

"""
@author: sunxianpeng
@file: 1_a_yuan_linear_demo.py
@time: 2019/11/22 14:22
"""

import torch
import numpy as np
from torch.autograd import Variable
import matplotlib.pyplot as plt

def getData():
    x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
                        [9.779], [6.182], [7.59], [2.167], [7.042],
                        [10.791], [5.313], [7.997], [3.1]], dtype=np.float32)

    y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
                        [3.366], [2.596], [2.53], [1.221], [2.827],
                        [3.465], [1.65], [2.904], [1.3]], dtype=np.float32)
    # 转换成 tensor
    x_train = torch.from_numpy(x_train)
    # 转换成 tensor
    y_train = torch.from_numpy(y_train)
    return x_train,y_train

def plt_data(x, y, color='bo', label=None):
    plt.plot(x.data.numpy(),y.data.numpy(),color,label=label)
    plt.show()

def plt_data2(x,y,pred):
    plt.plot(x.data.numpy(), pred.data.numpy(), 'bo', label='real')
    plt.plot(x.data.numpy(), y.data.numpy(), 'ro', label='estimated')
    plt.legend()
    plt.show()


def linear_model(x,w,b):
    return x * w + b

def getLoss(pred,y):
    return torch.mean((pred - y) ** 2)


if __name__ == '__main__':
    epochs = 100
    learning_rate = 0.001

    torch.manual_seed(2019)
    x_train,y_train = getData()
    # plt_data(x_train,y_train)

    # 定义参数
    w = torch.randn(1,requires_grad=True)# 随机初始化
    b = torch.zeros(1,requires_grad=True)# 使用0进行初始化
    print("w = {}\nw.shape = {}\nb = {}\nb.shape = {}".format(w,w.shape,b,b.shape))

    for epoch in range(epochs):
        pred = linear_model(x_train,w,b)
        loss = getLoss(pred,y_train)


        loss.backward()

        w.data = w.data -  learning_rate * w.grad.data
        b.data = b.data - learning_rate * b.grad.data
        if epoch % 1 == 0:
            print('epoch = {},loss = {}'.format(epoch,loss.item()))
        w.grad.data.zero_()
        b.grad.data.zero_()

    pred = linear_model(x_train,w,b)
    plt_data2(x_train, y_train, pred)

5、示例四

多元回归模型

# encoding: utf-8

"""
@author: sunxianpeng
@file: 1_a_yuan_linear_demo.py
@time: 2019/11/22 14:22
"""

import torch
import numpy as np
from torch.autograd import Variable
import matplotlib.pyplot as plt

def getData():
    # test_func()#测试多项式模型
    w_target = np.array([0.5, 3, 2.4])  #
    b_target = np.array([0.9])  #
    x_sample = np.arange(-3, 3.1, 0.1)
    y_sample = b_target[0] + w_target[0] * x_sample + w_target[1] * x_sample ** 2 + w_target[2] + x_sample ** 3

    x_train = np.stack([x_sample ** i for i in range(1,4)],axis=1)# n * 3
    y_train = torch.from_numpy(y_sample).double().unsqueeze(1)
    x_train = torch.from_numpy(x_train).float()
    return x_train,y_train,y_sample

def test_func():
    w_target = np.array([0.5, 3, 2.4])  #
    b_target = np.array([0.9])  # ਧԎ݇හ
    f_des = 'y	=	{:.2f}	+	{:.2f}	*	x	+	{:.2f}	*	x^2	+	{:.2f}	*	x^3'.format(
        b_target[0], w_target[0], w_target[1], w_target[2])  # 打印函数公式
    print(f_des)

    x_sample = np.arange(-3, 3.1, 0.1)
    y_sample = b_target[0] + w_target[0] * x_sample + w_target[1] * x_sample ** 2 + w_target[2] + x_sample ** 3
    plt_data(x_sample, y_sample)

def plt_data(x_train, y,pred):
    plt.plot(x_train.data.numpy()[:,0],pred.data.numpy(),label='fitting curve',color='r')
    plt.plot(x_train.data.numpy()[:,0],y,label='real curve',color='b')
    plt.legend()
    plt.show()

def multi_linear(x,w,b):
    return torch.mm(x,w) + b

def getLoss(pred,y):
    return torch.mean((pred - y) ** 2)

if __name__ == '__main__':
    torch.manual_seed(2019)
    epochs = 100
    learning_rate = 0.001

    x_train,y_train,y_sample = getData()

    w = torch.randn(3,1,requires_grad=True).float()
    b = torch.zeros(1,requires_grad=True).float()
    print("w = {}\nw.shape = {}\nb = {}\nb.shape = {}".format(w,w.shape,b,b.shape))

    for epoch in  range(epochs):
        pred = multi_linear(x_train,w,b)
        loss = getLoss(pred,y_train)


        loss.backward()

        w.data = w.data - learning_rate * w.grad.data
        b.data = b.data - learning_rate * b.grad.data
        w.grad.data.zero_()
        b.grad.data.zero_()
        if epoch % 10 ==0:
            print("w = {}\nw.shape = {}\nb = {}\nb.shape = {}".format(w, w.shape, b, b.shape))

    pred = multi_linear(x_train,w,b)
    plt_data(x_train,y_sample,pred)


 

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值