Pytorch的实现流程为:
本节的课上代码为:
import matplotlib.pyplot as plt
import torch
x_data = torch.tensor([[1.0], [2.0], [3.0]]) # 3*1,1指的是维度,3是样本数
y_data = torch.tensor([[2.0], [4.0], [6.0]]) # 3*1
loss_list = []
class LinearMode(torch.nn.Module):
def __init__(self):
super(LinearMode, self).__init__()
self.linear = torch.nn.Linear(1, 1) # 输入输出的维度是1
def forward(self, x):
y_pred = self.linear(x) #linear继承module,实现了call方法,call方法已经
return y_pred
model = LinearMode()
criterion = torch.nn.MSELoss(reduction='mean')
optimizer = torch.optim.SGD(model.parameters(), lr=0.04) #lr = 0.01太小,不收敛,可以试试0.04
for epoch in range(1000): #100/1000都可以试试看
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
loss_list.append(loss.item())
print('epoch:', epoch, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('w=', model.linear.weight.item())
print('b=', model.linear.bias.item())
x_test = torch.tensor([4.0])
y_test = model(x_test)
print('y_pred=', y_test.data)
plt.plot(loss_list)
plt.ylabel('loss')
plt.show()
由于数据量比较少,可以体验一下超参数的设置,这里lr=0.04,epoch=1000轮,因此收敛的比较快。结果为:
注意一下调用完损失函数的返回值loss依旧是tensor类型,只不过是一个标量