import torch
from torch.autograd import Variable
tensor = torch.FloatTensor([[1,2],[3,4]])
variable = Variable(tensor, requires_grad = True)
t_out = torch.mean(tensor*tensor) # 各个元素分别相乘,不是矩阵乘法
v_out = torch.mean(variable*variable)
'''v_out = 1/4sum(var*var)
x1 = 1 ,x2 = 2, x3 = 3, x4 = 4
v_out = 1/4(x1**2 + x2**2 + x3**2 + x4**2)
梯度就是分别对x求偏导
'''
print(v_out)
print(variable.grad) # 反向传播之前梯度值为none
v_out.backward() # tensor不能反向传播,而variable可以
print(variable.grad) # 反向传播后的更新值,
print(variable.data)
print(variable.data.numpy())
variable和tensor的区别
最新推荐文章于 2023-01-31 10:46:22 发布