from torch import nn from torch import optim as optimizer import torch from torch.autograd import Variable import matplotlib.pyplot as plt import numpy as np class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.linear = nn.Linear(1, 1) def forward(self, x): out = self.linear(x) return out x = [[1.0], [1.3], [1.9], [2.8]] y = [[0.9], [2.6], [2.1], [3.0]] X = Variable(torch.Tensor([[1.0], [1.3], [1.9], [2.8]])) y = Variable(torch.Tensor([[0.9], [2.6], [2.1], [3.0]])) model_sgd = Model() model_ad = Model() loss_function = nn.MSELoss() sgd = optimizer.SGD(model_sgd.parameters(), lr=0.01) ad = optimizer.Adam(model_ad.parameters(), lr=0.01) loss_sgd = [] for epoch in range(1000): out = model_sgd(X) sgd.zero_grad() loss = loss_function(out, y) loss.backward() sgd.step() print('epoch={}, loss={}'.format(epoch, loss.data.numpy())) loss_sgd.append(loss.data.numpy()) plt.scatter(x, y) Y = model_sgd(torch.Tensor([[x] for x in np.linspace(1, 3, 100)])).data.numpy() print(Y) plt.plot([[x] for x in np.linspace(1, 3, 100)], Y, color='r') plt.show() loss_ad = [] for epoch in range(1000): out = model_ad(X) ad.zero_grad() loss = loss_function(out, y) loss.backward() ad.step() print('epoch={}, loss={}'.format(epoch, loss.data.numpy())) loss_ad.append(loss.data.numpy()) plt.plot(loss_sgd, 'b') plt.plot(loss_ad, 'r') plt.show()
基于pytorch的线性回归算法并比较了Adam与SGD的收敛速度
最新推荐文章于 2024-04-23 23:55:42 发布