利用torch框架实现线性回归
# FILE: 学习深度学习/pytorch_linear
# USER: mcfly
# IDE: PyCharm
# CREATE TIME: 2024/9/2 21:03
# DESCRIPTION: 基于PyTorch的Linear Regression
import torch
# 训练数据
x_train = torch.Tensor([[1.0],[2.0],[3.0],[4.0]]) # 注意几层[]
y_train = torch.Tensor([[4.0],[7.0],[10.0],[13.0]])
# 定义Linear
class LinearModel(torch.nn.Module): # 继承自nn.Module以使用其框架
def __init__(self):
super(LinearModel, self).__init__()
self.linear = torch.nn.Linear(1,1,True) # 输入通道数,输出通道数,是否添加bias偏置,默认True
def forward(self, x): # 使用nn.Module必须重写forward,因nn.Linear在实现__call__()方法以Callable时调用了forward()
y_pred = self.linear(x) # 可调用对象,返回在当前weight和bias下的y
return y_pred
model = LinearModel()
criterion = torch.nn.MSELoss( size_average=False ) # 可调用对象,同继承自nn.Module。用到y和y_head所以也要构建计算图。平方差,size_average是否要求平均
optimizer = torch.optim.SGD(model.parameters(), lr=0.01) # model.parameters()就是w和b,lr学习率
for epoch in range(500):
y_pred = model(x_train)
loss = criterion(y_pred, y_train) # 注意顺序
print( epoch, loss.item() ) # 自动调用__str__(),不产生计算图
optimizer.zero_grad() # 每轮计算前权重清零
loss.backward() # 反向传播
optimizer.step() # 更新一轮
print('w = ', model.linear.weight.item())
# print('b = ', model.linear.bias.item())
x_test = torch.Tensor([[4.0]])
y_test = model(x_test)
print('y_pred = ', y_test.data)