本次课程老师通过使用torch的模块来完成对线性模型的训练。
视频如下:《PyTorch深度学习实践》完结合集_哔哩哔哩_bilibili
部分代码如下:
import torch
import matplotlib.pyplot as plt
# 构建数据集(线性方程:2.763*x-1.121)
def getDataset():
x = torch.Tensor([[1.0],[2.0],[3.0]])
y = torch.Tensor([[1.642],[4.404],[7.168]])
return x,y
# 构建线性模型
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel,self).__init__()
self.linear = torch.nn.Linear(1,1)
def forward(self,x):
y_pred = self.linear(x)
return y_pred
def run():
x_data,y_data = getDataset()
model = LinearModel()
# 保存loss值
ls_SGD = []
# ls_Adagrad = []
# ls_Adam = []
# ls_Adamax = []
# 计算loss值
criterion = torch.nn.MSELoss(size_average=False)
# 构建优化器
optimizer_SGD = torch.optim.SGD(model.parameters(),lr=0.01)
# optimizer_Adagrad = torch.optim.Adagrad(model.parameters(),lr=0.01)
# optimizer_Adam = torch.optim.Adam(model.parameters(),lr=0.01)
# optimizer_Adamax = torch.optim.Adamax(model.parameters(),lr=0.01)
for epoch in range(200):
y_pred = model(x_data)
loss = criterion(y_pred,y_data)
ls_SGD.append(loss)
# ls_Adagrad.append(loss)
# ls_Adam.append(loss)
# ls_Adamax.append(loss)
# 参数梯度置0
optimizer_SGD.zero_grad()
# optimizer_Adagrad.zero_grad()
# optimizer_Adam.zero_grad()
# optimizer_Adamax.zero_grad()
# 反向传播构建参数梯度值
loss.backward()
# 根据梯度下降执行一步参数更新
optimizer_SGD.step()
# optimizer_Adagrad.step()
# optimizer_Adam.step()
# optimizer_Adamax.step()
plt.plot(range(1,201),ls_SGD)
# plt.show()
plt.savefig('SGD.jpg', dpi=300)
if __name__=="__main__":
run()
SGD:
Adagrad:
Adam:
Adamax: