本文总结了pytorch梯度更新的不同实现方式,从数学基础推导到 使用optimizer优化器进行更新.
- 使用数学计算进行参数更新.
import torch
from torch.autograd import Variable
N,D_in,H,D_out=64,1000,100,10
x=Variable(torch.randn(N,D_in),requires_grad=False)
y=Variable(torch.randn(N,D_out),requires_grad=False)
w1=Variable(torch.randn(D_in,H),requires_grad=True)
w2=Variable(torch.randn(H,D_out),requires_grad=True)
learning_rate=1e-6
for t in range(500):
y_pred=x.mm(w1).clamp(min=0).mm(w2)
loss=(y_pred-y).pow(2).sum()
if w1.grad is not None: w1.grad.data.zero_()
if w2.grad is not None: w2.grad.data.zero_()
loss.backward()
w1.data-=learning_rate*w1.grad.data
w2.data-=learning_rate*w2.grad.data
print('loss:',loss)
- 使用torch内部的Sequntial()方法和grad方法进行更新
import torch
from torch.autograd import Variable
N,D_in,H,D_out=64,1000,100,10
x=Variable(torch.randn(N,D_in),requires_grad=False)
y=Variable(torch.randn(N,D_out),requires_grad=False)
w1=Variable(torch.randn(D_in,H),requires_grad=True)
w2=Variable(torch.randn(H,D_out),requires_grad=True)
learning_rate=1e-6
for t in range(10):
y_pred=x.mm(w1).clamp(min=0).mm(w2)
loss=(y_pred-y).pow(2).sum()
if w1.grad is not None: w1.grad.data.zero_()
if w2.grad is not None: w2.grad.data.zero_()
loss.backward()
w1.data-=learning_rate*w1.grad.data
w2.data-=learning_rate*w2.grad.data
print('w1:',w1,'\nw2:',w2)
- 根据模型的构建获取参数,进而更新参数
import torch
from torch.autograd import Variable
N,D_in,H,D_out=64,100,100,10
x=Variable(torch.randn(N,D_in))
y=Variable(torch.randn(N,D_out),requires_grad=False)
model=torch.nn.Sequential(torch.nn.Linear(D_in,H),
torch.nn.ReLU(),
torch.nn.Linear(H,D_out))
loss_fn=torch.nn.MSELoss(size_average=False)
learning_rate=1e-4
for t in range(500):
y_pred=model(x)
loss=loss_fn(y_pred,y)
model.zero_grad()
loss.backward()
for param in model.parameters():
param.data-=learning_rate*param.grad.data
print('loss:',loss)
- 根据自己编写的模型和优化器进行梯度更新
import torch
from torch.autograd import Variable
class TwoLayerNet(torch.nn.Module):
def __init__(self,D_in,H,D_out):
super(TwoLayerNet,self).__init__()
self.linear1=torch.nn.Linear(D_in,H)
self.linear2=torch.nn.Linear(H,D_out)
def forward(self,x):
h_relu=self.linear1(x)
y_pred=self.linear2(h_relu)
return y_pred
N,D_in,H,D_out=64,1000,100,10
x=Variable(torch.randn(N,D_in))
y=Variable(torch.randn(N,D_out),requires_grad=False)
model=TwoLayerNet(D_in,H,D_out)
criterion=torch.nn.MSELoss(size_average=False)
optimizer=torch.optim.SGD(model.parameters(),lr=1e-4)
for t in range(500):
y_pred=model(x)
loss=criterion(y_pred,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(loss.detach().numpy())
本文有助于读者对梯度更新原理的理解以及pytorch模型的构建.更多的详细信息可以参考斯坦福公开课.