import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch.optim as optim
# step1:制造数据集
x_train = np.array([[3.3],[4.4],[5.5],[6.71],[6.93],[4.168],[9.779],[6.182],[7.59],[2.167],[7.042],
[10.791],[5.313],[7.997],[3.1]], dtype=np.float32)
y_train = np.array([[1.7],[2.76],[2.09],[3.19],[1.694],[1.573],[3.366],[2.596],[2.53],[1.221],
[2.827],[3.465],[1.65],[2.904],[1.3]], dtype=np.float32)
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train)
# step2:建立线性回归模型这里的nn.Linear表示的是y=wx+b,input_size表示的是自变量x的尺寸,
# 是一维的,output_size表示的是因变量y的尺寸,也是一维
class LinearRegression(nn.Module):
def __init__(self):
super(LinearRegression, self).__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
out = self.linear(x)
return out
# 实例化LinearRegression
model = LinearRegression()
# step4:定义损失函数
criterion = nn.MSELoss()
# step5:定义一个优化函数
optimizer = optim.SGD(model.parameters(), lr=0.01)
# step6 训练模型
"""
首先要确定迭代的次数,这里设计的是1000次,先进行前向传播计算损失,然后进行反向传播计算梯度。
每次计算梯度前都要将梯度归0,不然梯度会累加到一起造成结果不收敛。为了便于查看结果,每隔一段
时间输出一次当前迭代的次数和损失
"""
num_epochs = 1000
for epoch in range(num_epochs):
inputs = Variable(x_train)
target = Variable(y_train)
# 前向传播
out = model(inputs)
loss = criterion(out, target)
# 后向传播
optimizer.zero_grad()
loss.backward()
# 优化
optimizer.step()
# 更新参数
if (epoch+1) % 20 == 0:
print('Epoch[{}/{}],loss:{:.6f}'.format(epoch+1, num_epochs, loss.item()))
# step7 模型测试
if __name__ == '__main__':
model.eval()
predict = model(Variable(x_train))
predict = predict.data.numpy()
plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original data')
plt.plot(x_train.numpy(), predict, label='Fitting Line')
plt.show()
基于PyTorch实现一元线性回归模型
最新推荐文章于 2023-05-22 15:48:04 发布