文章目录
Tensor
import torch
#生成一个矩阵
a=torch.Tensor([[1,2],[3,4],[5,6],[7,8]])
print(a)
print('{}'.format(a))
print('{}'.format(a.size()))
#生成一个全为0的矩阵
b=torch.zeros((4,2))
print(b)
#生成不同类型的矩阵
c=torch.IntTensor([[1,2],[3,4],[5,6],[7,8]])
print(c)
d=torch.LongTensor([[1,2],[3,4],[5,6],[7,8]])
print(d)
e=torch.DoubleTensor([[1,2],[3,4],[5,6],[7,8]])
print(d)
#访问里面的元素或改变
print(e[1,1])
e[1,1]=0
print(e[1,1])
#Tensor与Numpy的转化
f=e.numpy()
print(f)
g=torch.from_numpy(f)
print(g)
#是否将Tensor放在GPU上运行
if torch.cuda.is_available():
h=g.cuda()
print(h)
Variable
Variable的创建和使用
1.我们首先创建一个空的Variable:
import torch
#创建Variable
a = torch.autograd.Variable()
print(a)
可以看到默认的类型为Tensor
2.那么,我们如果需要给Variable变量赋值,那么就一定是Tensor类型
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
3.第一章提到了Variable的三个属性,我们依次打印它们:
import torch
#创建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)
三、标量求导计算图
1.为了方便起见,我们可以将torch.autograd.Variable简写为Variable:
from torch.autograd import Variable
2.之后,我们先声明一个变量x,这里requires_grad=True意义是否对这个变量求梯度,默认的 Fa!se:
x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)
import torch
#创建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)
#建立计算图
from torch.autograd import Variable
x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)
3.我们再声明两个变量w和b:
w = Variable(torch.Tensor([3]),requires_grad = True)
print(w)
b = Variable(torch.Tensor([4]),requires_grad = True)
print(b)
4.我们再写两个变量y1和y2:
y1 = w * x + b
print(y1)
y2 = w * x + b * x
print(y2)
5.我们来计算各个变量的梯度,首先是y1:
#计算梯度
y1.backward()
print(x.grad)
print(w.grad)
print(b.grad)
import torch
#创建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)
#建立计算图
from torch.autograd import Variable
x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)
w = Variable(torch.Tensor([3]),requires_grad = True)
print(w)
b = Variable(torch.Tensor([4]),requires_grad = True)
print(b)
y1 = w * x + b
print(y1)
y2 = w * x + b * x
print(y2)
#计算梯度
y1.backward()
print(x.grad)
print(w.grad)
print(b.grad)
其中:
y1 = 3 * 2 + 4 = 10,
y2 = 3 * 2 + 4 * 2 = 14,
x的梯度是3因为是3 * x,
w的梯度是2因为w * 2,
b的梯度是1因为b * 1(* 1被省略)
6.其次是y2,注销y1部分:
y2.backward(x)
print(x.grad)
print(w.grad)
print(b.grad)
import torch
#创建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]