Pytorch中clone()与detach()的区别
- clone与原tensor不共享内存,detach与原tensor共享内存。
import torch
from torch.autograd import Variable
a = Variable(torch.zeros([3,4]), requires_grad = True)
a.data.uniform_()
b = a.data.clone()
c = a.data.detach()
print("origin tensor:",a)
# clone与原tensor不共享内存,detach与原tensor共享内存
a.data *= 10
print("a:",a)
print("b:",b) # clone的值不会随着原tensor的变化而变化
print("c:",c) # 改变原tensor,detach的值也变化
2. clone支持梯度回传,detach不支持梯度回传。
import torch
from torch.autograd import Variable
# clone支持梯度回传,detach不支持梯度回传
a = Variable(torch.tensor([1.0]), requires_grad = True)
b = a.data.clone()
c = a.data.detach()
y = a**2
y.backward()
print(a.grad)
print(b.grad) # detech不保存本身的梯度
print(c.grad)
z = a**6 + 2*b + 3*c
z.backward()
print(a.grad) # 6*1+2, detach的c不参与梯度计算
print(b.grad) # detech不保存本身的梯度
print(c.grad)
如果想要非叶节点也保留梯度的话,可以用retain_grad()。
import torch
from torch.autograd import Variable
# clone支持梯度回传,detach不支持梯度回传
a = Variable(torch.tensor([1.0]), requires_grad = True)
b = a.data.clone()
b.requires_grad = True
b.retain_grad()
c = a.data.detach()
z = a**6 + 2*b + 3*c
z.backward()
print(a.grad) # 6*1+2, detach的c不参与梯度计算
print(b.grad) # detech不保存本身的梯度
print(c.grad)