pytorch-多层神经网络

25 篇文章 1 订阅
11 篇文章 2 订阅
import torch
import numpy as np
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F

import matplotlib.pyplot as plt
%matplotlib inline
data = [(34.62365962451697, 78.0246928153624, 0.0),
 (30.28671076822607, 43.89499752400101, 0.0),
 (35.84740876993872, 72.90219802708364, 0.0),
 (60.18259938620976, 86.30855209546826, 1.0),
 (79.0327360507101, 75.3443764369103, 1.0),
 (45.08327747668339, 56.3163717815305, 0.0),
 (61.10666453684766, 96.51142588489624, 1.0),
 (75.02474556738889, 46.55401354116538, 1.0),
 (76.09878670226257, 87.42056971926803, 1.0),
 (84.43281996120035, 43.53339331072109, 1.0),
 (95.86155507093572, 38.22527805795094, 0.0),
 (75.01365838958247, 30.60326323428011, 0.0),
 (82.30705337399482, 76.48196330235604, 1.0),
 (69.36458875970939, 97.71869196188608, 1.0),
 (39.53833914367223, 76.03681085115882, 0.0),
 (53.9710521485623, 89.20735013750205, 1.0),
 (69.07014406283025, 52.74046973016765, 1.0),
 (67.94685547711617, 46.67857410673128, 0.0),
 (70.66150955499435, 92.92713789364831, 1.0),
 (76.97878372747498, 47.57596364975532, 1.0),
 (67.37202754570876, 42.83843832029179, 0.0),
 (89.6767757507208, 65.79936592745237, 1.0),
 (50.534788289883, 48.85581152764205, 0.0),
 (34.21206097786789, 44.20952859866288, 0.0),
 (77.9240914545704, 68.9723599933059, 1.0),
 (62.27101367004632, 69.95445795447587, 1.0),
 (80.1901807509566, 44.82162893218353, 1.0),
 (93.114388797442, 38.80067033713209, 0.0),
 (61.83020602312595, 50.25610789244621, 0.0),
 (38.78580379679423, 64.99568095539578, 0.0),
 (61.379289447425, 72.80788731317097, 1.0),
 (85.40451939411645, 57.05198397627122, 1.0),
 (52.10797973193984, 63.12762376881715, 0.0),
 (52.04540476831827, 69.43286012045222, 1.0),
 (40.23689373545111, 71.16774802184875, 0.0),
 (54.63510555424817, 52.21388588061123, 0.0),
 (33.91550010906887, 98.86943574220611, 0.0),
 (64.17698887494485, 80.90806058670817, 1.0),
 (74.78925295941542, 41.57341522824434, 0.0),
 (34.1836400264419, 75.2377203360134, 0.0),
 (83.90239366249155, 56.30804621605327, 1.0),
 (51.54772026906181, 46.85629026349976, 0.0),
 (94.44336776917852, 65.56892160559052, 1.0),
 (82.36875375713919, 40.61825515970618, 0.0),
 (51.04775177128865, 45.82270145776001, 0.0),
 (62.22267576120188, 52.06099194836679, 0.0),
 (77.19303492601364, 70.45820000180959, 1.0),
 (97.77159928000232, 86.7278223300282, 1.0),
 (62.07306379667647, 96.76882412413983, 1.0),
 (91.56497449807442, 88.696292545466, 1.0),
 (79.94481794066932, 74.16311935043758, 1.0),
 (99.2725269292572, 60.99903099844988, 1.0),
 (90.54671411399852, 43.39060180650027, 1.0),
 (34.52451385320009, 60.39634245837173, 0.0),
 (50.2864961189907, 49.80453881323059, 0.0),
 (49.58667721632031, 59.80895099453265, 0.0),
 (97.64563396007767, 68.86157272420604, 1.0),
 (32.57720016809309, 95.59854761387875, 0.0),
 (74.24869136721598, 69.82457122657193, 1.0),
 (71.7964620586338, 78.45356224515052, 1.0),
 (75.3956114656803, 85.75993667331619, 1.0),
 (35.28611281526193, 47.02051394723416, 0.0),
 (56.25381749711624, 39.26147251058019, 0.0),
 (30.05882244669796, 49.59297386723685, 0.0),
 (44.66826172480893, 66.45008614558913, 0.0),
 (66.56089447242954, 41.09209807936973, 0.0),
 (40.45755098375164, 97.53518548909936, 1.0),
 (49.07256321908844, 51.88321182073966, 0.0),
 (80.27957401466998, 92.11606081344084, 1.0),
 (66.74671856944039, 60.99139402740988, 1.0),
 (32.72283304060323, 43.30717306430063, 0.0),
 (64.0393204150601, 78.03168802018232, 1.0),
 (72.34649422579923, 96.22759296761404, 1.0),
 (60.45788573918959, 73.09499809758037, 1.0),
 (58.84095621726802, 75.85844831279042, 1.0),
 (99.82785779692128, 72.36925193383885, 1.0),
 (47.26426910848174, 88.47586499559782, 1.0),
 (50.45815980285988, 75.80985952982456, 1.0),
 (60.45555629271532, 42.50840943572217, 0.0),
 (82.22666157785568, 42.71987853716458, 0.0),
 (88.9138964166533, 69.80378889835472, 1.0),
 (94.83450672430196, 45.69430680250754, 1.0),
 (67.31925746917527, 66.58935317747915, 1.0),
 (57.23870631569862, 59.51428198012956, 1.0),
 (80.36675600171273, 90.96014789746954, 1.0),
 (68.46852178591112, 85.59430710452014, 1.0),
 (42.0754545384731, 78.84478600148043, 0.0),
 (75.47770200533905, 90.42453899753964, 1.0),
 (78.63542434898018, 96.64742716885644, 1.0),
 (52.34800398794107, 60.76950525602592, 0.0),
 (94.09433112516793, 77.15910509073893, 1.0),
 (90.44855097096364, 87.50879176484702, 1.0),
 (55.48216114069585, 35.57070347228866, 0.0),
 (74.49269241843041, 84.84513684930135, 1.0),
 (89.84580670720979, 45.35828361091658, 1.0),
 (83.48916274498238, 48.38028579728175, 1.0),
 (42.2617008099817, 87.10385094025457, 1.0),
 (99.31500880510394, 68.77540947206617, 1.0),
 (55.34001756003703, 64.9319380069486, 1.0),
 (74.77589300092767, 89.52981289513276, 1.0)]
# 标准化
x0_max = max([i[0] for i in data])
x1_max = max([i[1] for i in data])
data = [(i[0] / x0_max, i[1] / x1_max, i[2]) for i in data]

x0 = list(filter(lambda x: x[-1] == 0.0, data))
x1 = list(filter(lambda x: x[-1] == 1.0, data))

plot_x0 = [i[0] for i in x0]
plot_y0 = [i[1] for i in x0]
plot_x1 = [i[0] for i in x1]
plot_y1 = [i[1] for i in x1]

plt.plot(plot_x0, plot_y0, 'ro', label='x_0')
plt.plot(plot_x1, plot_y1, 'bo', label='x_1')
plt.legend(loc='best')
np_data = np.array(data, dtype='float32')
x_data = torch.from_numpy(np_data[:, 0:2])
y_data = torch.from_numpy(np_data[:, -1]).unsqueeze(1)
通过Sequential搭建
seq_net = nn.Sequential(
    nn.Linear(2, 10),
    nn.ReLU(),
    nn.Linear(10, 2)
)
seq_net
Sequential(
  (0): Linear(in_features=2, out_features=10, bias=True)
  (1): ReLU()
  (2): Linear(in_features=10, out_features=2, bias=True)
)
# 序列模块可以通过索引访问每一层
print(seq_net[0])
# 打印出第一层的权重
w0 = seq_net[0].weight
print(w0)
# 通过 parameters 可以取得模型的参数
param = seq_net.parameters()
print(param)
# 定义优化器
optimizer = torch.optim.SGD(seq_net.parameters(), lr=0.02)
Linear(in_features=2, out_features=10, bias=True)
Parameter containing:
tensor([[-0.5646,  0.7071],
        [ 0.1801,  0.2718],
        [-0.2039, -0.6752],
        [ 0.4783,  0.2476],
        [-0.4736,  0.1862],
        [-0.2190,  0.5800],
        [-0.6509, -0.2531],
        [-0.3308,  0.6637],
        [-0.3464, -0.6938],
        [-0.5845,  0.3192]], requires_grad=True)
<generator object Module.parameters at 0x000001DD8AD4F150>
criterion = nn.CrossEntropyLoss()

x_data = x_data.type(torch.FloatTensor)
y_data = y_data.type(torch.LongTensor).view(100)
x_data.shape, y_data.shape
(torch.Size([100, 2]), torch.Size([100]))
plt.ion()
plt.show()

optimizer = torch.optim.Adam(seq_net.parameters(), lr=0.02)
loss_func = torch.nn.CrossEntropyLoss()

for i in range(100):
    out = seq_net(x_data)

    loss = loss_func(out, y_data)

    optimizer.zero_grad()   # 清空上一步的残余更新参数值
    loss.backward()         # 误差反向传播, 计算参数更新值
    optimizer.step()        # 将参数更新值施加到 net 的 parameters 上
    
    if i % 10 == 0:
        plt.cla()
        prediction = torch.max(F.softmax(out), 1)[1]
        pred_y = prediction.data.numpy().squeeze()
        target_y = y_data.data.numpy()
        plt.subplots_adjust(wspace =0, hspace =0)#调整子图间距
        plt.scatter(x_data.data.numpy()[:, 0], x_data.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='YlGn')
        accuracy = sum(pred_y == target_y) / 100.
        plt.text(2, 1, 'Accuracy={:.2f}, loss{:.6f}'.format(accuracy, loss), fontdict={'size': 20, 'color':  'red'})
        plt.pause(0.1)

plt.ioff()
plt.show()
通过nn.Module搭建
相对于Sequential更加灵活
class module_net(nn.Module):
    def __init__(self, num_input, num_hidden, num_output):
        super(module_net, self).__init__()
        self.layer1 = nn.Linear(num_input, num_hidden)
        
        self.layer2 = nn.ReLU()
        
        self.layer3 = nn.Linear(num_hidden, num_output)
        
    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        return x
mo_net = module_net(2, 10, 2)
mo_net, mo_net.layer1, mo_net.layer1.weight
(module_net(
   (layer1): Linear(in_features=2, out_features=10, bias=True)
   (layer2): ReLU()
   (layer3): Linear(in_features=10, out_features=2, bias=True)
 ),
 Linear(in_features=2, out_features=10, bias=True),
 Parameter containing:
 tensor([[-0.0596,  0.5833],
         [-0.2053, -0.3198],
         [-0.2849,  0.4568],
         [ 0.2389,  0.4954],
         [ 0.5458, -0.0301],
         [-0.5192,  0.1924],
         [-0.5884, -0.6907],
         [ 0.6217, -0.2638],
         [ 0.6625, -0.5638],
         [ 0.5429, -0.4078]], requires_grad=True))
plt.ion()
plt.show()

optimizer = torch.optim.Adam(mo_net.parameters(), lr=0.02)
loss_func = torch.nn.CrossEntropyLoss()

for i in range(100):
    out = mo_net(x_data)

    loss = loss_func(out, y_data)

    optimizer.zero_grad()   # 清空上一步的残余更新参数值
    loss.backward()         # 误差反向传播, 计算参数更新值
    optimizer.step()        # 将参数更新值施加到 net 的 parameters 上
    
    if i % 10 == 0:
        plt.cla()
        prediction = torch.max(F.softmax(out), 1)[1]
        pred_y = prediction.data.numpy().squeeze()
        target_y = y_data.data.numpy()
        plt.subplots_adjust(wspace =0, hspace =0)#调整子图间距
        plt.scatter(x_data.data.numpy()[:, 0], x_data.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='YlGn')
        accuracy = sum(pred_y == target_y) / 100.
        plt.text(2, 1, 'Accuracy={:.2f}, loss{:.6f}'.format(accuracy, loss), fontdict={'size': 20, 'color':  'red'})
        plt.pause(0.1)

plt.ioff()
plt.show()
保存模型和参数
# 将参数和模型都保存出来
torch.save(seq_net, 'save_seq_net.pth')
# 读取保存的模型
seq_net1 = torch.load('save_seq_net.pth')
seq_net1, seq_net1[0].weight
(Sequential(
   (0): Linear(in_features=2, out_features=10, bias=True)
   (1): ReLU()
   (2): Linear(in_features=10, out_features=2, bias=True)
 ),
 Parameter containing:
 tensor([[-0.7264,  0.5381],
         [ 1.4755,  1.6805],
         [-0.2039, -0.6752],
         [ 1.9221,  1.8080],
         [-0.4736,  0.1862],
         [-0.4618,  0.3095],
         [-0.6509, -0.2531],
         [-1.5772, -0.0485],
         [-1.0295, -1.3885],
         [-0.5845,  0.3192]], requires_grad=True))
# 保存模型参数
torch.save(seq_net.state_dict(), 'save_seq_net_params.pth')
seq_net2 = nn.Sequential(
    nn.Linear(2, 10),
    nn.Tanh(),
    nn.Linear(10, 2)
)

seq_net2.load_state_dict(torch.load('save_seq_net_params.pth'))
seq_net2, seq_net2[0].weight
(Sequential(
   (0): Linear(in_features=2, out_features=10, bias=True)
   (1): Tanh()
   (2): Linear(in_features=10, out_features=2, bias=True)
 ),
 Parameter containing:
 tensor([[-0.7264,  0.5381],
         [ 1.4755,  1.6805],
         [-0.2039, -0.6752],
         [ 1.9221,  1.8080],
         [-0.4736,  0.1862],
         [-0.4618,  0.3095],
         [-0.6509, -0.2531],
         [-1.5772, -0.0485],
         [-1.0295, -1.3885],
         [-0.5845,  0.3192]], requires_grad=True))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
在使用PyTorch构建多层神经网络时,可以按照以下步骤进行: 1. 数据的准备:首先,需要准备好用于训练和测试的数据。可以使用torch.Tensor()将数据转化为张量,同时根据需求使用torch.LongTensor()构造长整型张量。 2. 构建模型:接下来,需要定义神经网络的结构。可以使用torch.nn中的各种层,如全连接层、卷积层等来构建多层神经网络。可以根据需要自定义网络的结构。 3. 定义损失函数:根据多分类问题的需求,可以选择使用交叉熵损失函数(torch.nn.CrossEntropyLoss()),该损失函数已经包含了softmax激活函数。 4. 定义优化器:选择合适的优化器,如随机梯度下降(SGD)、Adam等,用于更新网络的参数。 5. 训练网络:使用训练数据对网络进行训练。通过计算损失函数并反向传播,更新网络的参数以使损失函数最小化。 6. 测试网络:使用测试数据对训练好的网络进行测试,评估网络的性能。 综上所述,可以根据需求按照以上步骤构建多层神经网络,并使用PyTorch提供的函数和工具进行训练和测试。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* [pytorch搭建CNN+LSTM+Attention网络实现行车速度预测项目代码加数据](https://download.csdn.net/download/2301_79009758/88247134)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] - *2* *3* [pytorch搭建多层神经网络解决多分类问题(采用MNIST数据集)](https://blog.csdn.net/gary101818/article/details/122430644)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] [ .reference_list ]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值