# .requires_grad_(...)可以改变现有张量的requires_grad属性,默认flag为False
a = torch.randn(2,2)
a =((a *3)/(a -1))print(a.requires_grad)
a.requires_grad_(True)print(a.requires_grad)
b =(a * a).sum()print(b.grad_fn)
False
True
<SumBackward0 object at 0x0000000007BEBFC8>
x = torch.randn(3, requires_grad=True)
y = x *2while y.data.norm()<1000:# .data.norm() 张量L2范数
y = y *2print("y =",y)
gradients = torch.tensor([0.1,1.0,0.0001], dtype=torch.float)
y.backward(gradients)print("grad =", x.grad)
y = tensor([1203.4269, 1136.8713, 854.0352], grad_fn=<MulBackward0>)
grad = tensor([2.0480e+02, 2.0480e+03, 2.0480e-01])