变量在torch中是建立一个计算图,但是相比较Tensorflow和Theano中的静态图,
它是动态的,torch没有placeholder,torch只能给计算图传递变量
import numpy as np
import torch
from torch.autograd import Variable
tensor=torch.Tensor([[1,2],[3,4]])
variable=Variable(tensor,requires_grad=True)
#requires_grad是参不参与误差反向传播, 要不要计算梯度
print(tensor)
print(variable)
t_out=torch.mean(tensor*tensor)
v_out=torch.mean(variable*variable) # v_out = 1/4 * sum(variable*variable)
print(t_out)
print(v_out)
v_out.backward() # v_out的反向传播
# 初始化变量的梯度
print(variable.grad) # d(v_out)/d(variable) = 1/4*2*variable = variable/2
print(variable) #variable format
print(variable.data) #tensor format
print(variable.data.numpy()) #numpy format
输出:
tensor([[1., 2.],
[3., 4.]])
tensor([[1., 2.],
[3., 4.]], requires_grad=True)
tensor(7.5000)
tensor(7.5000, grad_fn=<MeanBackward1>)
tensor([[0.5000, 1.0000],
[1.5000, 2.0000]])
tensor([[1., 2.],
[3., 4.]], requires_grad=True)
tensor([[1., 2.],
[3., 4.]])
[[1. 2.]
[3. 4.]]