基于pytorch手动code损失函数,并比较了7种梯度下降算法

50 篇文章 4 订阅
40 篇文章 2 订阅
import torch
import matplotlib.pyplot as plt
import torch.optim as optim
import random

def grad_down(optimizer, x, w, y):
    optimizer.zero_grad()
    output = torch.mv(x, w)
    loss = MSELoss(output, y)
    loss.backward()
    optimizer.step()
    return loss.data.numpy()

def MSELoss(output, target):
    return (1 / 50) * ((output.view(-1) - target) ** 2).sum()

x = torch.Tensor([[i] for i in range(50)])
y = torch.Tensor([[i*2 + + random.random()] for i in range(50)])

w1 = torch.zeros(1, requires_grad=True)
w2 = torch.zeros(1, requires_grad=True)
w3 = torch.zeros(1, requires_grad=True)
w4 = torch.zeros(1, requires_grad=True)
w5 = torch.zeros(1, requires_grad=True)
w6 = torch.zeros(1, requires_grad=True)
w7 = torch.zeros(1, requires_grad=True)
w8 = torch.zeros(1, requires_grad=True)


learning_rate = 0.01
optimizer1 = optim.SGD([w1, ], learning_rate)
optimizer2 = optim.Adam([w2, ], learning_rate)
optimizer3 = optim.Adadelta([w3, ], learning_rate)
optimizer4 = optim.Adagrad([w4, ], learning_rate)
optimizer5 = optim.Adamax([w5, ], learning_rate)
#optimizer6 = optim.LBFGS([w6, ], learning_rate)
optimizer7 = optim.RMSprop([w7, ], learning_rate)
optimizer8 = optim.RMSprop([w8, ], learning_rate)

loss1 = []
loss2 = []
loss3 = []
loss4 = []
loss5 = []
loss6 = []
loss7 = []
loss8 = []
for step in range(10000):
    loss1.append(grad_down(optimizer1, x, w1, y))
    loss2.append(grad_down(optimizer2, x, w2, y))
    loss3.append(grad_down(optimizer3, x, w3, y))
    loss4.append(grad_down(optimizer4, x, w4, y))
    loss5.append(grad_down(optimizer5, x, w5, y))
    #loss6.append(grad_down(optimizer6, x, w6, y))
    loss7.append(grad_down(optimizer7, x, w7, y))
    loss8.append(grad_down(optimizer8, x, w8, y))
    print('step={}'.format(step))



plt.plot(loss2, label='Adam')

plt.plot(loss3, label='Adadelta')

plt.plot(loss4, label='Adagrad')

plt.plot(loss5, label='Adamax')

#plt.plot(loss6,LBFGS)
plt.plot(loss7, label='RMSprop')

plt.plot(loss8, label='SGRMSprop')

plt.plot(loss1, label='SGD')
plt.legend(loc=0)
plt.show()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值