1.原理
使用PyTorch的内部函数实现线性回归,训练模型
2.代码展示
# author:ZhuYuYing
# data:2021/7/6
# projectName:tor-start
import torch
x_data = torch.tensor([[1.0], [2.0], [3.0]])
y_data = torch.tensor([[2.0], [4.0], [6.0]])
'''
__init__()
初始化函数
torch.nn.Linear
第一个参数:weight
第二个参数:bias
'''
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel, self).__init__()
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
y_pred = self.linear(x)
return y_pred
model = LinearModel()
'''
torch.nn.MSELoss
(1) 如果 reduce = False,那么 size_average 参数失效,直接返回向量形式的 loss
(2) 如果 reduce = True,那么 loss 返回的是标量
(3) reduction = ‘none’,直接返回向量形式的 loss
(4) reduction = ‘sum’,返回loss之和
(5) reduction = ''elementwise_mean,返回loss的平均值
(6) reduction = ''mean,返回loss的平均值
torch.optim.SGD
优化函数,model.parameters()为该实例中可优化的参数
lr为参数优化的选项(学习率等)
'''
criterion = torch.nn.MSELoss(reduction='sum') #计算MSE
optimizer = torch.optim.SGD(model.parameters(), lr=0.01) # 梯度下降
for epoch in range(1000):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
print(epoch, loss.item())
optimizer.zero_grad() # 将梯度初始化为零
loss.backward()
optimizer.step() # update 参数,即更新w和b的值
#训练结果:w,b最佳值
print('w = ', model.linear.weight.item())
print('b = ', model.linear.bias.item())
#测试一个数据
x_test = torch.tensor([[4.0]])
y_test = model(x_test)
print('y_test = ', y_test.data)
结果打印
0 34.59614181518555
1 15.420919418334961
2 6.884373188018799
3 3.0838661193847656
4 1.3917133808135986
···············
···············
···············
997 1.991135434309399e-08
998 1.9568119569157716e-08
999 1.9304664533592586e-08
w = 2.0000925064086914
b = -0.0002103645383613184
y_test = tensor([[8.0002]])