快速搭建全连接神经网络实现二分类(学习笔记二)

1.代码

import torch
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


class NeuralNetwork(torch.nn.Module):
    def __init__(self, n_feature, hidden1, output):
        super(NeuralNetwork, self).__init__()
        self.hidden0 = torch.nn.Linear(n_feature, hidden1)
        self.predict = torch.nn.Linear(hidden1, output)

    def forward(self, x):
        x = torch.tanh(self.hidden0(x))
        x = self.predict(x)
        return x


def classification():
    x_data = torch.ones(200, 2)
    x0 = torch.normal(2 * x_data ** 2, 1)
    y0 = torch.zeros(200)
    x1 = torch.normal(-1.6 * x_data ** 2 +torch.rand(x_data.size()), 1)
    y1 = torch.ones(200)
    x = torch.cat((x0, x1), 0).type(torch.FloatTensor)
    y = torch.cat((y0, y1), ).type(torch.LongTensor)

    #plt.scatter(x[:, 0], x[:, 1], c=y, s=100, lw=0, marker='o', cmap='rainbow')
    #plt.show()
    net = NeuralNetwork(2, 28, 2)
    plt.ion()
    plt.show()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.02)
    loss_fuc = torch.nn.CrossEntropyLoss()

    for t in range(10):
        out = net(x)
        loss = loss_fuc(out, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if t % 2 == 0:
            plt.cla()
            prediction = torch.max(out, 1)[1]
            prediction_y = prediction.data.numpy()
            target_y = y.data.numpy()
            plt.scatter(x[:, 0], x[:, 1], c=prediction_y, s=100, lw=0, cmap='RdYlGn')
            accuracy = float((prediction_y == target_y).astype(int).sum()) / float(target_y.size)
            plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color':  'red'})
            plt.pause(0.1)
    plt.ioff()
    plt.show()


if __name__ == '__main__':
    classification()

2.结果展示

在这里插入图片描述

3.第二种快速搭建神经网络的方式实现

import torch
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

def classification():
    x_data = torch.ones(200, 2)
    x0 = torch.normal(2 * x_data ** 2, 1)
    y0 = torch.zeros(200)
    x1 = torch.normal(-1.6 * x_data ** 2 + torch.rand(x_data.size()), 1)
    y1 = torch.ones(200)
    x = torch.cat((x0, x1), 0).type(torch.FloatTensor)
    y = torch.cat((y0, y1), ).type(torch.LongTensor)

    #plt.scatter(x[:, 0], x[:, 1], c=y, s=100, lw=0, marker='o', cmap='rainbow')
    #plt.show()
    #net = NeuralNetwork(2, 28, 2)
    net = torch.nn.Sequential(
        torch.nn.Linear(2, 10),
        torch.nn.ReLU(),
        torch.nn.Linear(10, 10),
        torch.nn.Tanh(),
        torch.nn.Linear(10, 2)
    )
    plt.ion()
    plt.show()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.02)
    loss_fuc = torch.nn.CrossEntropyLoss()

    for t in range(10):
        out = net(x)
        loss = loss_fuc(out, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if t % 2 == 0:
            plt.cla()
            prediction = torch.max(out, 1)[1]
            prediction_y = prediction.data.numpy()
            target_y = y.data.numpy()
            plt.scatter(x[:, 0], x[:, 1], c=prediction_y, s=100, lw=0, cmap='RdYlGn')
            accuracy = float((prediction_y == target_y).astype(int).sum()) / float(target_y.size)
            plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color':  'red'})
            plt.pause(0.1)
    plt.ioff()
    plt.show()


if __name__ == '__main__':
    classification()

其中代码:

  net = torch.nn.Sequential(
        torch.nn.Linear(2, 10),
        torch.nn.ReLU(),
        torch.nn.Linear(10, 10),
        torch.nn.Tanh(),
        torch.nn.Linear(10, 2)
    )

就是取代了第一种方法定义的一个神经网络类,这里表示的是定义了两个隐藏层的神经网络,第一层隐藏层使用的是relu激活函数,神经元数量是10个,第二层隐藏层使用的是tanh激活函数,神经元依然是10个。因为这里做的是二分类问题,数据集只有两个特征,所以输入层是2个神经元,输出层也是两个神经元。

4. 保存训练的模型

torch.save(net, 'net.pkl')
torch.save(net.state_dict(), 'net_params.pkl')

第一种方式是保证整个模型的所有细节,第二种方式是保存模型的参数。加载模型的方法则如下所示:

def load1():
    net = torch.load('net.pkl')

def load2():
    net = torch.nn.Sequential(
        torch.nn.Linear(1, 10),
        torch.nn.Sigmoid(),
        torch.nn.Linear(10, 1)
    )
    net.load_state_dict(torch.load('net_params.pkl'))

整体代码则是:

import torch
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

def regression():
    net = torch.nn.Sequential(
        torch.nn.Linear(1, 10),
        torch.nn.Sigmoid(),
        torch.nn.Linear(10, 1)
    )
    optimizer = torch.optim.Adam(net.parameters(), lr=0.18)
    loss_fuc = torch.nn.MSELoss()

    for t in range(180):
        prediction = net(x)
        loss = loss_fuc(prediction, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    torch.save(net, 'net.pkl')
    torch.save(net.state_dict(), 'net_params.pkl')
    plt.figure(1, figsize=(10, 3))
    plt.subplot(131)
    plt.title('Net1')
    plt.scatter(x, y)
    plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)


def load1():
    net = torch.load('net.pkl')
    prediction = net(x)
    plt.subplot(132)
    plt.title('Net2')
    plt.scatter(x, y)
    plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)


def load2():
    net = torch.nn.Sequential(
        torch.nn.Linear(1, 10),
        torch.nn.Sigmoid(),
        torch.nn.Linear(10, 1)
    )
    net.load_state_dict(torch.load('net_params.pkl'))
    prediction = net(x)
    plt.subplot(133)
    plt.title('Net3')
    plt.scatter(x, y)
    plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
    plt.show()


if __name__ == '__main__':
    regression()
    load1()
    load2()

结果如下所示:
在这里插入图片描述

  • 1
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值