一、制作数据
import torch
import numpy as np
import matplotlib.pyplot as plt
# 制定数据
TRUE_W = 3.0
TRUE_b = 2.0
# 制作训练数据,添加一些噪声进去
NUM_EXAMPLES = 1000
inputs = torch.unsqueeze(torch.rand(NUM_EXAMPLES), dim=1)
print(np.shape(inputs.numpy()))
noise = torch.unsqueeze(torch.rand(NUM_EXAMPLES), dim=1)
outputs = inputs * TRUE_W + TRUE_b + noise
print(np.shape(outputs.numpy()))
plt.scatter(inputs.numpy(), outputs.numpy(), c='b')
plt.show()
二、模型结构
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel, self).__init__()
self.linear = torch.nn.Linear(1, 1) # 输入和输出的维度都是1
def forward(self, x):
out = self.linear(x)
return out
Model = LinearModel()
print(Model)
print(Model.parameters())
三、开始训练
def plot(epoch):
plt.scatter(inputs.numpy(), outputs.numpy(), c='b')
plt.scatter(inputs.detach().numpy(), Model(inputs).detach().numpy(), c='r')
plt.title("epoch %2d, loss = %s" %(epoch, str( loss.item())))
plt.legend()
plt.draw()
plt.ion() # replacing plt.show()
plt.pause(1)
plt.close()
lossFunction = torch.nn.MSELoss()
optimizer = torch.optim.SGD(Model.parameters(), lr=1e-2)
num_epochs = 1000
for epoch in range(num_epochs):
x_input = torch.autograd.Variable(inputs)
y_gt = torch.autograd.Variable(outputs)
# 前向传播
y_pre = Model(x_input)
# print(x_input.detach().numpy(), y_pre.detach().numpy())
loss = lossFunction(y_pre, y_gt)
# 反向传播
optimizer.zero_grad() # 每次迭代都需要清0
loss.backward()
optimizer.step()
if((epoch + 1) % 40 == 0):
plot(epoch)
print('Epoch[{}/{}], loss:{:.6f}'.format(epoch+1, num_epochs, loss.item()))
最终可以拟合出一条直线