Pytorch笔记:通过训练好的文件建立神经网络

Pytorch笔记:通过训练好的文件建立神经网络

import torch
import numpy as np
import torch.nn.functional as F
x=torch.Tensor([[1,1],[1,0],[0,1],[0,0]])#训练数据
y=torch.Tensor([[1],[0],[0],[1]])#标签
#print(y)
#搭建cnn的快速方法
net=torch.nn.Sequential(
    torch.nn.Linear(2,10),
    torch.nn.ReLU(),
    torch.nn.Linear(10,1),
    torch.nn.Sigmoid()
)
print('--------------------------------------')
print('当前神经网络:')
print(net)
loss_function=torch.nn.BCELoss()#使用这个类时要注意,输入值(不是分类)的范围要在 (0,1)(0,1) 之间,否则会报错
print('--------------------------------------')
print('损失函数:')
print(loss_function)
optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9)#自动调整学习率,加快学习速度
print('--------------------------------------')
print('优化函数:')
print(optimizer)
for i in range(100):
    out=net(x)#输入训练数据,神经系统输出数据为out
    #print(out)
    loss=loss_function(out,y)#计算输出与预期值的误差
    print ("loss is %f"%loss.data.numpy())
    optimizer.zero_grad()#清除梯度,否则会累加产生错误
    loss.backward()#误差反向传播
    optimizer.step()#调整参数
print(out)
print(y)
torch.save(net,'net1.pkl')
torch.save(net.state_dict(),'net2.pkl')
def restore_net():#通过完整网络重建神经网络
    net1=torch.load('net1.pkl')
    print(net1)

def restore_parms():#通过完整网络参数重建神经网络
    net2=torch.nn.Sequential(
    torch.nn.Linear(2,10),
    torch.nn.ReLU(),
    torch.nn.Linear(10,1),
    torch.nn.Sigmoid()
    )
    net2.load_state_dict(torch.load('net2.pkl'))
    print(net2)
restore_net()
restore_parms()

运行结果:

H:\ProgramData\Anaconda3\python.exe D:/PycharmProjects/untitled/cnn.py
--------------------------------------
当前神经网络:
Sequential(
  (0): Linear(in_features=2, out_features=10, bias=True)
  (1): ReLU()
  (2): Linear(in_features=10, out_features=1, bias=True)
  (3): Sigmoid()
)
--------------------------------------
损失函数:
BCELoss()
--------------------------------------
优化函数:
SGD (
Parameter Group 0
    dampening: 0
    lr: 0.1
    momentum: 0.9
    nesterov: False
    weight_decay: 0
)
loss is 0.736376
loss is 0.734357
loss is 0.730678
loss is 0.725774
loss is 0.720087
loss is 0.714016
loss is 0.707876
loss is 0.701885
loss is 0.696155
loss is 0.690704
loss is 0.685471
loss is 0.680635
loss is 0.676661
loss is 0.672562
loss is 0.668630
loss is 0.664352
loss is 0.659626
loss is 0.654341
loss is 0.648385
loss is 0.641634
loss is 0.635253
loss is 0.628334
loss is 0.620825
loss is 0.611669
loss is 0.600721
loss is 0.587921
loss is 0.574643
loss is 0.562429
loss is 0.549595
loss is 0.534750
loss is 0.518863
loss is 0.504897
loss is 0.490779
loss is 0.474788
loss is 0.457842
loss is 0.439078
loss is 0.421331
loss is 0.403956
loss is 0.386479
loss is 0.367945
loss is 0.348469
loss is 0.329513
loss is 0.311717
loss is 0.294036
loss is 0.275589
loss is 0.257188
loss is 0.239693
loss is 0.225180
loss is 0.211445
loss is 0.196223
loss is 0.182617
loss is 0.170881
loss is 0.160195
loss is 0.149525
loss is 0.138817
loss is 0.128968
loss is 0.120675
loss is 0.111924
loss is 0.104452
loss is 0.097914
loss is 0.091152
loss is 0.085104
loss is 0.079656
loss is 0.074736
loss is 0.070161
loss is 0.065969
loss is 0.062073
loss is 0.058538
loss is 0.055255
loss is 0.052181
loss is 0.049308
loss is 0.046626
loss is 0.044193
loss is 0.041921
loss is 0.039796
loss is 0.038049
loss is 0.036300
loss is 0.034418
loss is 0.032971
loss is 0.031656
loss is 0.030443
loss is 0.029266
loss is 0.028194
loss is 0.027150
loss is 0.026157
loss is 0.025219
loss is 0.024342
loss is 0.023503
loss is 0.022699
loss is 0.021933
loss is 0.021222
loss is 0.020557
loss is 0.020023
loss is 0.019520
loss is 0.018956
loss is 0.018410
loss is 0.017950
loss is 0.017506
loss is 0.017080
loss is 0.016673
tensor([[0.9941],
        [0.0122],
        [0.0120],
        [0.9642]], grad_fn=<SigmoidBackward>)
tensor([[1.],
        [0.],
        [0.],
        [1.]])
Sequential(
  (0): Linear(in_features=2, out_features=10, bias=True)
  (1): ReLU()
  (2): Linear(in_features=10, out_features=1, bias=True)
  (3): Sigmoid()
)
Sequential(
  (0): Linear(in_features=2, out_features=10, bias=True)
  (1): ReLU()
  (2): Linear(in_features=10, out_features=1, bias=True)
  (3): Sigmoid()
)

Process finished with exit code 0

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值