下面x是一维度的线性回归。改天补多维度x=(x1,x2,x3,x4...,xm)的线性回归。
import torch
import matplotlib.pyplot as plt
import torch.nn as nn
from torch.autograd import Variable as Var
import torch.optim as optim
#构造训练集
x_train = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
y_train = 3 * x_train + 10 + torch.rand(x_train.size())
# 上面这行代码是制造出接近y=3x+10的数据集,后面加上torch.rand()函数制造噪音
#画图
#x = x_train.numpy()
#y = y_train.numpy()
#plt.plot(x,y, 'ro', label='Original data')
#plt.legend()
#plt.show()
#定义模型
class LinearRegression(nn.Module):
def __init__(self):
super(LinearRegression,self).__init__()
self.linear = nn.Linear(1,1) #输入输出纬度均为1
def forward(self,x):
out = self.linear((x))
return out
#初始化
model = LinearRegression() #创建模型
criterion = nn.MSELoss() #定义损失函数为均方误差
optimizer = optim.SGD(model.parameters(),lr=1e-2) # 定义优化算法为梯度下降法
#训练
num_epochs = 1000 #训练次数
for epoch in range(num_epochs): #将tensor类型转成 variable类型,nodle中只有variable
inputs = Var(x_train)
target = Var(y_train)
#向前传播
out = model(inputs)
loss = criterion(out,target) #计算损失,调用均方误差损失函数
#向后传播
optimizer.zero_grad() #每次迭代需要清零
loss.backward()
optimizer.step()
if (epoch+1) % 20 == 0:
print('Epoch[{}/{}],loss:{:.6f}'.format(epoch+1,num_epochs,loss.data[0]))
model.eval() #训练结束,进入测试
predict = model(Var(x_train))
predict = predict.data.numpy()
#因为predict是variable 三个构成,所以加一个data()也就是取里面的tensor结构部分
plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original Data')
plt.plot(x_train.numpy(), predict,label='Fitting Line')
plt.show()