直接上代码:
# torch realize one-dimensional linear regression
import numpy as np
import torch
from torch import nn, optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
x_train = np.array([[3.3], [4.4], [5.5], [7.5], [4.16]], dtype=np.float32)
y_train = np.array([[3.8], [4.7], [5.6], [7.4], [4.15]], dtype=np.float32)
# array transform into tensor
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train)
# 定义torch简单模型
class Linearregession(nn.Module):
def __init__(self):
super(Linearregession, self).__init__()
self.linear = nn.Linear(1,1) # input and output is 1 dimension
def forward(self, x):
out = self.linear(x)
return out
# use gpu or not
if torch.cuda.is_available():
model = Linearregession().cuda()
else:
model = Linearregession()
criterion = nn.MSELoss() # 均方误差作为loss
optimizer = optim.SGD(model.parameters(),lr=1e-3) # 梯度下降
# 开始训练
num_epochs=1000
for epoch in range(num_epochs):
if torch.cuda.is_available():
inputs = Variable(x_train).cuda()
target = Variable(y_train).cuda()
else:
inputs = Variable(x_train)
target = Variable(y_train)
# forward
out = model(inputs) # 前向传播
loss = criterion(out, target) # 计算loss
# backward
optimizer.zero_grad() # 梯度归零
loss.backward() # 反向传播
optimizer.step() # 更新参数
if (epoch+1) % 20 == 0:
print('Epoch[{}/{}], loss: {:.6f}'.format(epoch+1, num_epochs, loss.data[0]))
model.eval()# 将模型变成测试模式
predict = model(Variable(x_train.cuda()))
predict = predict.data.cpu().numpy()
# cpu版本torch代码:predict = model(Variable(x_train))
# cpu版本torch代码:predict = predict.data.numpy()
plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original data')
plt.plot(x_train.numpy(), predict, label='Fitting Line')
# 显示图例
plt.legend()
plt.show()
# 保存模型
# torch.save(model.state_dict(), './linear.pth')
注:
1.此代码为gpu环境下,若为cpu环境测试,请将其中已备注的两句cpu代码替换掉原代码
2.如果出现torch.from_numpy函数找不到的情况,(即代码显示常亮),笔者通过pycharm重启解决问题,也有其他作者提供了答案笔者不知是否有效,可参考解决方案