莫烦pytorch,集中优化方式的比较
#optimizer.py
import torch
import torch.nn.functional as F
import torch.utils.data as Data
import matplotlib.pyplot as plt
from torch.autograd import Variable
#hyper parameters
LR=0.01
BATCH_SIZE=32
EPOCH=40
x=torch.unsqueeze(torch.linspace(-1,1,100),dim=1)
y=x.pow(2) + 0.1*torch.normal(torch.zeros(*x.size()))
# plt.scatter(x.numpy(), y.numpy())
# plt.show()
torch_dataset = Data.TensorDataset(x,y)
loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)
class Net(torch.nn.Module):
def __init__(self):
super(Net,self).__init__()
self.hidden=torch.nn.Linear(1,20)
self.predict=torch.nn.Linear(20,1)
def forward(self, x):
x = F.relu(self.hidden(x))
x=self.predict(x)
return x
net_SGD= Net()
net_Momentum = Net()
net_RMSprop = Net()
net_Adam = Net()
nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]
opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=LR)
opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=LR,momentum=0.8)
opt_RMSprop=torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)
opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9,0.99))
optimizers= [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]
loss_func = torch.nn.MSELoss()
losses_his=[[], [], [], []]
for epoch in range(EPOCH):
for step, (batch_x, batch_y) in enumerate(loader):
b_x=batch_x
b_y=batch_y
for net , opt, l_his in zip(nets, optimizers, losses_his):
output = net(b_x)
loss = loss_func(output, b_y)
opt.zero_grad()
loss.backward()
opt.step()
l_his.append(loss.item())
labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
for i, l_his in enumerate(losses_his):
plt.plot(l_his, label=labels[i])
plt.legend(loc='best')
plt.xlabel('Step')
plt.ylabel('Loss')
plt.ylim((0,0.5))
plt.show()
# optimizer = torch.optim.SGD()