pytorch实现线性回归
w = Variable(torch.randn(1),requires_grad=True) #需要求梯度
b = Variable(torch.randn(1),requires_grad=True)
def linear_model(x):
return w*x+b
def get_loss(y_,y):
return torch.mean((y_-y)**2)
loss.backward()
w.data = w.data - 1e-2 * w.grad.data #w.data表示取w的数据
b.data = b.data - 1e-2 * b.grad.data #学习率
for e in range(10) #进行10次更新
y_ = linear_model(x_train)
loss = get _loss(y,y_train)
w.grad.zero_() #归零梯度
b.grad.zero_() #归零梯度
loss.backward()
w.data = w.data - 1e-2 * w.grad.data #更新w
b.data = b.data - 1e-2 * b.grad.data #更新w
print('epoch:{},loss:{}'.formate(e,loss.data[0]))
线性模型和梯度下降
import torch
import numpy as np
from torch.autograd import Variable
torch.manual_seed(2017)
#读入数据x和y
x_train = np.array([3.3],[4.4],[5.5],[6.71][6.93],[4.168],[9.779],[6.182],dtype=np.float32)
y_train = np,array([1.7],[2.76],[2.09],[3.19],[1.649],[1.573],[3.366],
[2.596]dtype=np.float32)
#画出图像
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(x_train,y_train,'bo')
#转换成Tensor
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train)
#定义参数w和b
w = Variable(torch.randn(1),requires_grad=True) #随机初始化
b = Variable(torch.zeros(1),rewuires_grad=True) #使用0进行初始化
#构建线性回归模型
x_train = Variable(x_train)
y_train = Variable(y_train)
def linear_model(x)
return w * x + b
y_ = linear_model(x_train)
以上步骤就定义好了迷行,在进行参数更新前,可以先看看模型的输出结果
plt.plot(x_train.data.numpy(),y_train.data.numpy(),'bo',label='real')
plt.plot(x_train.data.numpy(),y_.data.numpy(),'ro',label='estimated')
plt.legend()
#计算误差
def get_loss(y_,y):
return torch.mean((y_ - y_train) ** 2)
loss = get_loss(y_,y_train)
print(loss)
#计算w和b的梯度
loss.backward()
#查看w和b的梯度
print(w.grad)
print(b.grad)
#更新一次参数
w.data = w.data - 1e-2 * w.grad.data
b.data = b.data - 1e-2 * b.grad.data
更新完参数之后,看一下模型输出的结果
y_ = linear_model(x_train)
plt.plot(x_train.data.numpy(),y_train.data.numpy(),'bo',label='real')
plt.plot(x_train.data.numpy(),y_.data.numpy(),'ro',label='estimated')
plt.legend()
for e in range(10) #进行10次更新
y_ = linear_model(x_train)
loss = get _loss(y,y_train)
w.grad.zero_() #归零梯度
b.grad.zero_() #归零梯度
loss.backward()
w.data = w.data - 1e-2 * w.grad.data #更新w
b.data = b.data - 1e-2 * b.grad.data #更新w
print('epoch:{},loss:{}'.formate(e,loss.data[0]))
y_ = linear_model(x_train)
plt.plot(x_train.data.numpy(),y_train.data.numpy(),'bo',label='real')
plt.plot(x_train.data.numpy(),y_.data.numpy(),'ro',label='estimated')
plt.legend()
多项式回归模型:
#定义一个多变量函数
w_target = np.array([0.5,3,2.4]) #定义参数
b_target = np.array([0.9]) #定义参数
f_des = 'y = {:.2f} + {:.2f} * x + {:.2f} * x^2 + {:.2f} * x^3.format(b_target[0],w_target[1],w_target[2]) #打印出函数的式子
print(f_des)
#画出函数曲线
x_sample = np.arange(-3,3.1,0.1)
y_sample = b_target[0] + w_target[0] * x_sample +w_target[1] * x_sample ** 2 + w_target[2] * x_sample ** 3
plt.plot(x_sample,y_sample,label='real curve')
plt.legend()
# 构建数据x和y
# x 是一个如下矩阵[x,x^2,x^3]
# y是函数的结果[y]
x_train = np.stack([x_sample ** i for i in range(1,4)],axis=1)
x_train = torch.from_numpy(x_train).float() #转换成float tensor
y_train = torch.from_numpy(y_sample).float().unsqueeze(1) #转化成float tensor
定义需要优化的参数wi
#定义参数和模型
w = Variable(torch.randn(3,1),requires_grad=True)
b = Variable(torch.zeros(1),requires_grad=True)
#将x和y转换成Variable
x_train = Variable(x_train)
y_train = Variable(y_train)
def multi_linear(x):
return torch.mm(x,w) + b
#画出之前的模型
y_pred = multi_linear(x_train)
plt.plot(x_train.data.numpy()[:,0],y_pred.data.numpy(),label='fitting curve',color='r')
plt.plot(x_train.data.numpy()[:,0],y_sample,label='real curve',color='b')
plt.legend()
#计算误差,和之前一样
loss = get_loss(y_pred,y_train)
print(loss)
#自动求导
loss.backward()
#查看一下w和b的梯度
print(w.grad)
print(b.grad)
#更新参数
w.data = w.data - 0.001 * w.grad.data
b.data = b.data - 0.001 * b.grad.data
#画出更新一次后的模型
y_pred = multi_linear(x_train)
plt.plot(x_train.data.numpy()[:,0],y_pred.data.numpy(),label='fitting curve',color='r')
plt.plot(x_train.data.numpy()[:,0],y_sample,label='real curve',color='b')
plt.legend()
for e in range(100) #进行100次更新
y_pred = multi(x_train)
loss = get _loss(y_pred,y_train)
w.grad.data.zero_()
b.grad.data.zero_()
loss.backward()
#更新参数
w.data = w.data - 0.001 * w.grad.data #更新w
b.data = b.data - 0.001 * b.grad.data #更新w
if(e + 1) % 20 == 0:
print('epoch{},loss:{:.5f}'.formate