如果我的loss中包含有一阶梯度项,该如何用torch进行求导。
from torch import nn
import torch.nn.functional as F
import torch
class Classifier(nn.Module):
def __init__(self, dim_in=5):
super(Classifier, self).__init__()
self.fc = nn.Linear(dim_in, 1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.fc(x)
return self.relu(x)
def loss(self, x, y):
return torch.mean((x-y)**2)
if __name__ == '__main__':
h_hat = torch.rand(13, 5).requires_grad_(True)
y = torch.rand(13, 1).requires_grad_(True)
d = Classifier()
opt = torch.optim.Adam(d.parameters())
loss = d.loss(d(h_hat), y)
opt.zero_grad()
#求一阶梯度
loss.backward(create_graph=True)#二阶导数设置为True
grad = d.fc.weight.grad
print("grad", grad)
L_grad = torch.norm(grad, p=2)
#新的loss中包含一阶梯度项
loss = loss + L_grad
#opt.zero_grad() #这里不能放zero_grad(),会将一阶梯度清0,导致两次求出的梯度相同
#求二阶梯度
loss.backward() #
print("L_grad", d.fc.weight.grad)
opt.step()
#Terminal output:
#grad tensor([[ 0.1285, -0.0961, 0.1789, 0.1829, 0.0649]], grad_fn=<CopyBackwards>)
#L_grad tensor([[1.2037, 0.4403, 1.4736, 1.4062, 1.1052]], grad_fn=<CopyBackwards>)
#可以看到这两个梯度是不一样的