项目实战之线性回归-pytorch
from torch.autograd import Variable
import torch
import numpy as np
import matplotlib.pyplot as plt
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=np.float32)
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=np.float32)
x_train=torch.from_numpy(x_train)
y_train=torch.from_numpy(y_train)
x_train = Variable(x_train)
y_train = Variable(y_train)
def linear_model(x):
return W*x +b
def get_loss(y_,y):
return torch.mean((y_ - y )**2)
W = Variable(torch.randn(1),requires_grad=True)
b = Variable(torch.zeros(1),requires_grad=True)
for e in range(10):
y_ = linear_model(x_train)
Loss = get_loss(y_,y_train)
Loss.backward(retain_graph=True)
W.grad.zero_()
b.grad.zero_()
Loss.backward()
W.data = W.data -(1*e-2)*W.grad.data
b.data = b.data -(1*e-2)*b.grad.data
print('epoch:{},loss:{}'.format(e,Loss.item()))`