快速搭建全连接神经网络实现非线性回归(学习笔记一)

1.神经网络模型的搭建

定义一个基础的神经网络架构,同时新建一个net=Net(1, 100, 100, 10, 1),包含三个隐藏层的神经网络:

import torch
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_hidden1, n_hidden2, n_output):
        super(Net, self).__init__()
        self.hidden0 = torch.nn.Linear(n_feature, n_hidden)
        self.hidden1 = torch.nn.Linear(n_hidden, n_hidden1)
        self.hidden2 = torch.nn.Linear(n_hidden1, n_hidden2)
        self.predict = torch.nn.Linear(n_hidden2, n_output)

    def forward(self, x):
        x = torch.relu(self.hidden0(x))
        x = torch.relu(self.hidden1(x))
        x = torch.relu(self.hidden2(x))
        x = self.predict(x)
        return x
net = Net(1, 100, 100, 10, 1)
print(net)

输出结果为:

Net(
  (hidden0): Linear(in_features=1, out_features=100, bias=True)
  (hidden1): Linear(in_features=100, out_features=100, bias=True)
  (hidden2): Linear(in_features=100, out_features=100, bias=True)
  (predict): Linear(in_features=100, out_features=1, bias=True)
)

这里可以清楚的看到所搭建的神经网络的架构细节

2.神经网络实现非线性回归

import torch
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

x = torch.unsqueeze(torch.linspace(-3, 5, 421), dim=1)
y = 0.3 * x.pow(2) + 0.2 * torch.rand(x.size())


class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden,  n_output):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(n_feature, n_hidden)
        self.predict = torch.nn.Linear(n_hidden, n_output)

    def forward(self, x):
        x = torch.relu(self.hidden(x))
        x = self.predict(x)
        return x


net = Net(1, 100, 1)
#print(net)
plt.ion()
plt.show()
optimizer = torch.optim.Adam(net.parameters(), lr=0.1)
loss_func = torch.nn.MSELoss()

for t in range(200):
    prediction = net(x)
    loss = loss_func(prediction, y)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    if t % 5 == 0:
        plt.cla()
        plt.scatter(x, y)
        plt.plot(x, prediction, 'r-', lw=3)
        plt.text(0.5, 0, 'Loss=%.4f' % loss.item(), fontdict={'size': 20, 'color': 'red'})
        plt.pause(0.1)
plt.ioff()
plt.show()

2.结果展示

在这里插入图片描述

3.关键代码解析

1.代码:

import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

是为了解决下面的问题

OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized.

2.代码:

x = torch.unsqueeze(torch.linspace(-3, 5, 421), dim=1)
y = 0.3 * x.pow(2) + 0.2 * torch.rand(x.size())

产生数据,x和y

3.快速搭建神经网络实现非线性回归

import torch
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

def regression():
    x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
    y = x.pow(2) + torch.rand(x.size())

    net = torch.nn.Sequential(
        torch.nn.Linear(1, 10),
        torch.nn.Sigmoid(),
        torch.nn.Linear(10, 3),
        torch.nn.Tanh(),
        torch.nn.Linear(3, 1)
    )
    plt.ion()
    plt.show()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.1)
    loss_fuc = torch.nn.MSELoss()

    for t in range(180):
        prediction = net(x)
        loss = loss_fuc(prediction, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if t % 5 == 0:
            plt.cla()
            plt.scatter(x, y)
            plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=3)
            plt.text(0.5, 0, 'Loss=%.4f' % loss.item(), fontdict={'size': 20, 'color': 'red'})
            plt.pause(0.1)
    plt.ioff()
    plt.show()


if __name__ == '__main__':
    regression()

4.结果展示

在这里插入图片描述

5. 分批次传入数据进行训练

代码:

import torch
import torch.utils.data as Data
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
Batch_size = 50

x = torch.unsqueeze(torch.linspace(-20, 30, 200), dim=1)
y = 3 * x.pow(2) + torch.rand(x.size())
plt.scatter(x, y, lw=1)
torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(
    dataset=torch_dataset,
    batch_size=Batch_size,
    shuffle=True,
    num_workers=4,
)

net = torch.nn.Sequential(
    torch.nn.Linear(1, 100),
    torch.nn.Tanh(),
    torch.nn.Linear(100, 1),
)

optimizer = torch.optim.Adam(net.parameters(), lr=0.3)
loss_fuction = torch.nn.MSELoss()

if __name__ == '__main__':
    for epoch in range(30):
        for step, (batch_x, batch_y) in enumerate(loader):
            prediction = net(x)
            loss = loss_fuction(prediction, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print('Epoch:', epoch, ' |Step:', step, ' |loss:', loss)
    prediction = net(x)
    plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=7)
    plt.show()

结果:
在这里插入图片描述

  • 0
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值