非线性函数拟合--基于pytorch的BP神经网络

本次的目的是利用PB神经网络拟合非线性函数:y=x^2+x+1

导入相关库:

import torch
import torch.nn as nn
from torch.optim import SGD
import torch.utils.data as Data
from sklearn.datasets import load_boston
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from torchviz import make_dot
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
import hiddenlayer as hl
from sklearn.metrics import accuracy_score,confusion_matrix,classification_report

手动生成数据集

既简单的生成x系列,并得到y

num = 400
train_x = torch.arange(-10, 10, 0.05)
train_x = train_x.numpy()
train_y = train_x * train_x + 2 * train_x + 1
train_x = train_x.reshape(num, 1)
train_y = train_y.reshape(num,)

得到从-10到10的间隔为0.05的400个点,并得到了相关的因变量

为了适应神经网络的输入层,利用.reshape()将数据转置;

拆分训练集,测试集

X_train, X_test, y_train, y_test = train_test_split(
    train_x, train_y, test_size=0.1, random_state=123
)

test_size表示测试集的比率

random_state表示随机种子,可以在后期应用相同的随机分布

转换为张量,并生成数据集合

将数据都转换为数据类型为float32的张量

利用dateloder生成一个batch为360组数据的数据组

X_train_s = torch.from_numpy(X_train_s.astype(np.float32))
y_train = torch.from_numpy(y_train.astype(np.float32))
X_test_s = torch.from_numpy(X_test_s.astype(np.float32))
y_test = torch.from_numpy(y_test.astype(np.float32))

train_data = Data.TensorDataset(X_train_s, y_train)
test_data = Data.TensorDataset(X_test_s,y_test)

train_loader = Data.DataLoader(
    dataset=train_data,  ## 使用的数据集
    batch_size=360
#    shuffle=True,  # 每次迭代前打乱数据
    # num_workers=1,  # 使用两个进程
)

test_loader = Data.DataLoader(
    dataset=test_data,  ## 使用的数据集
    batch_size=len(test_data)
#    shuffle=True,  # 每次迭代前打乱数据
    # num_workers=1,  # 使用两个进程
)

定义网络:

共四层全连接层

class MLPmodel(nn.Module):
    def __init__(self):
        super(MLPmodel, self).__init__()
        ## 定义第一个隐藏层
        self.hidden1 = nn.Linear(1, 200, bias=True)
        self.active1 = nn.ReLU()
        ## 定义第一个隐藏层
        self.hidden2 = nn.Linear(200, 80)
        self.active2 = nn.ReLU()
        ## 定义预测回归层
        self.hidden3 = nn.Linear(80,10)
        self.active3 = nn.ReLU()
        self.regression = nn.Linear(10, 1)

    ## 定义网络的向前传播路径
    def forward(self, x):
        x = self.hidden1(x)
        x = self.active1(x)
        x = self.hidden2(x)
        x = self.active2(x)
        x = self.hidden3(x)
        x = self.active3(x)
        output = self.regression(x)
        ## 输出为output
        return output

网络结构为:

MLPmodel(
  (hidden1): Linear(in_features=1, out_features=200, bias=True)
  (active1): ReLU()
  (hidden2): Linear(in_features=200, out_features=80, bias=True)
  (active2): ReLU()
  (hidden3): Linear(in_features=80, out_features=10, bias=True)
  (active3): ReLU()
  (regression): Linear(in_features=10, out_features=1, bias=True)
)

也可以采用相关库得到网络图:

x = torch.randn(1, 1, 50, 1).requires_grad_(True)
y = mlp1(x)
MyConvnetis = make_dot(y, params=dict(list(mlp1.named_parameters()) + [('x', x)]))
MyConvnetis.format = "png"
MyConvnetis.directory = r"C:\Users\12860\Desktop\2.png"
MyConvnetis.view()

 训练前的设置:

mlp1 = MLPmodel()
optimizer = torch.optim.Adam(mlp1.parameters(), lr=0.001)
loss_func = nn.MSELoss()  # 最小平方根误差
train_loss_all = []  ## 输出每个批次训练的损失函数
## 进行训练,并输出每次迭代的损失函数
history1 = hl.History()
# 使用Canvas进行可视化
canvas1 = hl.Canvas()
print_step = 100

训练网络

for epoch in range(2000):
    ## 对训练数据的迭代器进行迭代计算
    for step, (b_x, b_y) in enumerate(train_loader):
        output = mlp1(b_x).flatten()  # MLP在训练batch上的输出
        train_loss = loss_func(output, b_y)  # 平方根误差
        optimizer.zero_grad()  # 每个迭代步的梯度初始化为0
        train_loss.backward()  # 损失的后向传播,计算梯度
        optimizer.step()  # 使用梯度进行优化
        train_loss_all.append(train_loss.item())
        niter = epoch*len(train_loader)+step+1

    if niter % print_step == 0:
        output1 = mlp1(X_test_s)
        # 为history添加epoch,损失和精度
        y_test=y_test.reshape(len(y_test),1)
        test_loss = loss_func(output1,y_test)
        history1.log(niter, test_loss=test_loss )
        # 使用两个图像可视化损失函数和精度
        with canvas1:
            canvas1.draw_plot(history1["test_loss"])

plt.figure()
plt.plot(train_loss_all, "r-")
plt.title("Train loss per iteration")
plt.show()

利用hiddenlayer库进行可视化,test_loss图像是可以实时显示的:

验证网络



x1 = np.arange(-14, 14, 0.5)
x1 = x1.reshape(len(x1), 1)
y1 = x1* x1  + 2 * x1 + 1
#x1 = scales.fit_transform(x1)
#y1 = scales.fit_transform(y1)



plt.figure(2)
plt.plot(x1, y1, 'r-')

x2 = np.arange(-14, 14, 0.5)
x2 = x2.reshape(len(x2), 1)
#x2 = scales.transform(x2)
x2 = torch.from_numpy(x2.astype(np.float32))

y2 = mlp1(x2)
x2 = x2.numpy()
y2 = y2.detach().numpy()


plt.scatter(x2, y2)

plt.show()

 拟合效果在训练集附近比较准确,原理训练集之后会慢慢降低准确度。

总体代码:

import torch
import torch.nn as nn
from torch.optim import SGD
import torch.utils.data as Data
from sklearn.datasets import load_boston
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from torchviz import make_dot
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
import hiddenlayer as hl
from sklearn.metrics import accuracy_score,confusion_matrix,classification_report

num = 400
train_x = torch.arange(-10, 10, 0.05)
train_x = train_x.numpy()
train_y = train_x * train_x + 2 * train_x + 1
train_x = train_x.reshape(num, 1)
train_y = train_y.reshape(num,)


X_train, X_test, y_train, y_test = train_test_split(
    train_x, train_y, test_size=0.1, random_state=123
)

scales = MinMaxScaler(feature_range=(0, 1))
X_train_s=  X_train
X_test_s=X_test

X_train_s = torch.from_numpy(X_train_s.astype(np.float32))
y_train = torch.from_numpy(y_train.astype(np.float32))
X_test_s = torch.from_numpy(X_test_s.astype(np.float32))
y_test = torch.from_numpy(y_test.astype(np.float32))

train_data = Data.TensorDataset(X_train_s, y_train)
test_data = Data.TensorDataset(X_test_s,y_test)

train_loader = Data.DataLoader(
    dataset=train_data,  ## 使用的数据集
    batch_size=360
#    shuffle=True,  # 每次迭代前打乱数据
    # num_workers=1,  # 使用两个进程
)

test_loader = Data.DataLoader(
    dataset=test_data,  ## 使用的数据集
    batch_size=len(test_data)
#    shuffle=True,  # 每次迭代前打乱数据
    # num_workers=1,  # 使用两个进程
)


class MLPmodel(nn.Module):
    def __init__(self):
        super(MLPmodel, self).__init__()
        ## 定义第一个隐藏层
        self.hidden1 = nn.Linear(1, 200, bias=True)
        self.active1 = nn.ReLU()
        ## 定义第一个隐藏层
        self.hidden2 = nn.Linear(200, 80)
        self.active2 = nn.ReLU()
        ## 定义预测回归层
        self.hidden3 = nn.Linear(80,10)
        self.active3 = nn.ReLU()
        self.regression = nn.Linear(10, 1)

    ## 定义网络的向前传播路径
    def forward(self, x):
        x = self.hidden1(x)
        x = self.active1(x)
        x = self.hidden2(x)
        x = self.active2(x)
        x = self.hidden3(x)
        x = self.active3(x)
        output = self.regression(x)
        ## 输出为output
        return output


## 输出我们的网络结构
mlp1 = MLPmodel()
optimizer = torch.optim.Adam(mlp1.parameters(), lr=0.001)
loss_func = nn.MSELoss()  # 最小平方根误差
train_loss_all = []  ## 输出每个批次训练的损失函数
## 进行训练,并输出每次迭代的损失函数
history1 = hl.History()
# 使用Canvas进行可视化
canvas1 = hl.Canvas()
print_step = 100


for epoch in range(2000):
    ## 对训练数据的迭代器进行迭代计算
    for step, (b_x, b_y) in enumerate(train_loader):
        output = mlp1(b_x).flatten()  # MLP在训练batch上的输出
        train_loss = loss_func(output, b_y)  # 平方根误差
        optimizer.zero_grad()  # 每个迭代步的梯度初始化为0
        train_loss.backward()  # 损失的后向传播,计算梯度
        optimizer.step()  # 使用梯度进行优化
        train_loss_all.append(train_loss.item())
        niter = epoch*len(train_loader)+step+1

    if niter % print_step == 0:
        output1 = mlp1(X_test_s)
        # 为history添加epoch,损失和精度
        y_test=y_test.reshape(len(y_test),1)
        test_loss = loss_func(output1,y_test)
        history1.log(niter, test_loss=test_loss )
        # 使用两个图像可视化损失函数和精度
        with canvas1:
            canvas1.draw_plot(history1["test_loss"])



x = torch.randn(1, 1, 50, 1).requires_grad_(True)
y = mlp1(x)
MyConvnetis = make_dot(y, params=dict(list(mlp1.named_parameters()) + [('x', x)]))
MyConvnetis.format = "png"
MyConvnetis.directory = r"C:\Users\12860\Desktop\2.png"
MyConvnetis.view()

plt.figure()
plt.plot(train_loss_all, "r-")
plt.title("Train loss per iteration")
plt.show()

x1 = np.arange(-14, 14, 0.5)
x1 = x1.reshape(len(x1), 1)
y1 = x1* x1  + 2 * x1 + 1
#x1 = scales.fit_transform(x1)
#y1 = scales.fit_transform(y1)



plt.figure(2)
plt.plot(x1, y1, 'r-')

x2 = np.arange(-14, 14, 0.5)
x2 = x2.reshape(len(x2), 1)
#x2 = scales.transform(x2)
x2 = torch.from_numpy(x2.astype(np.float32))

y2 = mlp1(x2)
x2 = x2.numpy()
y2 = y2.detach().numpy()


plt.scatter(x2, y2)

plt.show()

  • 6
    点赞
  • 54
    收藏
    觉得还不错? 一键收藏
  • 7
    评论
评论 7
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值