pytorch 任务——PyTorch理解更多神经网络优化方法

1. 了解不同优化器
2. 书写优化器代码
3. Momentum
4. 二维优化,随机梯度下降法进行优化实现
5. Ada自适应梯度调节法
6. RMSProp
7. Adam
8. PyTorch种优化器选择

import torch
import torchvision
import torchvision.transforms as transform
import torch.utils.data as Data
import matplotlib.pyplot as plt
from torch.utils.data import Dataset,DataLoader
import pandas as pd
import numpy as np
from torch.autograd import Variable
 
# data  set
train=pd.read_csv('Thirdtest.csv')
#cut 0 col as label
train_label=train.iloc[:,[0]] #只读取一列
#train_label=train.iloc[:,0:3]
#cut 1~16 col as data
train_data=train.iloc[:,1:]
#change  to np
train_label_np=train_label.values
train_data_np=train_data.values
 
#change to tensor
train_label_ts=torch.from_numpy(train_label_np)
train_data_ts=torch.from_numpy(train_data_np)
 
train_label_ts=train_label_ts.type(torch.LongTensor)
train_data_ts=train_data_ts.type(torch.FloatTensor)
 
 
 
print(train_label_ts.shape)
print(type(train_label_ts))
 
train_dataset=Data.TensorDataset(train_data_ts,train_label_ts)
train_loader=DataLoader(dataset=train_dataset,batch_size=64,shuffle=True)
 
#make a network
 
import torch.nn.functional as F     # 激励函数都在这
 
class Net(torch.nn.Module):     # 继承 torch 的 Module
    def __init__(self ):
        super(Net, self).__init__()     # 继承 __init__ 功能
        self.hidden1 = torch.nn.Linear(16, 30)# 隐藏层线性输出
        self.out = torch.nn.Linear(30, 3)       # 输出层线性输出
 
    def forward(self, x):
        # 正向传播输入值, 神经网络分析出输出值
        x = F.relu(self.hidden1(x))      # 激励函数(隐藏层的线性值)
        x = self.out(x)                 # 输出值, 但是这个不是预测值, 预测值还需要再另外计算
        return x
 
 
 
# net=Net()
# optimizer = torch.optim.SGD(net.parameters(), lr=0.0001,momentum=0.001)
# loss_func = torch.nn.CrossEntropyLoss()  # the target label is NOT an one-hotted
 
# loss_list=[]
# for epoch in range(500):
#     for step ,(b_x,b_y) in enumerate (train_loader):
#         b_x,b_y=Variable(b_x),Variable(b_y)
#         b_y=b_y.squeeze(1)
#         output=net(b_x)
#         loss=loss_func(output,b_y)
#         optimizer.zero_grad()
#         loss.backward()
#         optimizer.step()
#         if epoch%1==0:
#             loss_list.append(float(loss))
#         print( "Epoch: ", epoch, "Step ", step, "loss: ", float(loss))
 
 
# 为每个优化器创建一个 net
net_SGD         = Net()
net_Momentum    = Net()
net_RMSprop     = Net()
net_Adam        = Net()
nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]
 
#定义优化器
LR=0.0001
opt_SGD         = torch.optim.SGD(net_SGD.parameters(), lr=LR,momentum=0.001)
opt_Momentum    = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8)
opt_RMSprop     = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)
opt_Adam        = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99))
optimizers = [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]
 
loss_func = torch.nn.CrossEntropyLoss()
losses_his = [[], [], [], []]
 
for net, opt, l_his in zip(nets, optimizers, losses_his):
    for epoch in range(500):
        for step, (b_x, b_y) in enumerate(train_loader):
            b_x, b_y = Variable(b_x), Variable(b_y)
            b_y = b_y.squeeze(1)# 数据必须得是一维非one-hot向量
        # 对每个优化器, 优化属于他的神经网络
 
            output = net(b_x)              # get output for every net
            loss = loss_func(output, b_y)  # compute loss for every net
            opt.zero_grad()                # clear gradients for next train
            loss.backward()                # backpropagation, compute gradients
            opt.step()                     # apply gradients
            if epoch%1==0:
                l_his.append(loss.data.numpy())     # loss recoder
                print("optimizers: ",opt,"Epoch: ",epoch,"Step ",step,"loss: ",float(loss))
 
labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
for i, l_his in enumerate(losses_his):
    plt.plot(l_his, label=labels[i])
plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.xlim((0,1000))
plt.ylim((0,4))
plt.show()
 
 
 
 
 
 
 
 
#
# for epoch in range(5):
#     for step ,(b_x,b_y) in enumerate (train_loader):
#         b_x,b_y=Variable(b_x),Variable(b_y)
#         b_y=b_y.squeeze(1)
#         output=net(b_x)
#         loss=loss_func(output,b_y)
#         loss.backward()
#         optimizer.zero_grad()
#         optimizer.step()
#         print(loss)

转自https://blog.csdn.net/xiexu911/article/details/80861913

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值