import torch
a1 = torch.rand(2,3) # 随机2x3的tensor
a2 = torch.randn(3, 4)
a3 = torch.zeros(1, 4)
a4 = torch.ones((1, 2), dtype=torch.int64)
print(a2.size())
print(a2.shape)
print(a2.size(0)) # 0维度
a5 = torch.tensor([6, 2], dtype=torch.float32)
a5.type() # 类型
a5.type(torch.int64) # 变类型
# tensor与ndarry转换
import numpy as np
a6 = np.random.randn(2,3)
x1 = torch.from_numpy(a6) # numpy->tensor
t = x1.numpy() # tensor->numpy
a = [1,2,3]
b = numpy.array(a) # list -> np
b.tolist() # np->list
x1.numpy()
x2 = torch.rand(2,3)
x1 + x2
x1 + 3
x1.add(x2) # 不累加
x1.add_(x2) # 加在x1上
x1.view(3,2) # 变形
x1.view(-1,1) # shape(n,1)
x1.mean() # 均值
x1.sum()
x = x1.sum().item() # 转为标量
a7 = torch.ones(2,2, requires_grad=True) # 跟踪张量
a7.grad # a7的梯度
a7.grad_fn # 梯度由来
a8 = a7 + 3
a8.grad_fn
a9 = a8.mean()
a9.backward() # 自动梯度计算
a7.grad # 梯度
with torch.no_grad(): # 不跟踪
print((a7*2).requires_grad)
(a7*2).requires_grad
y = a7.detach() # 不获取跟踪
y.requires_grad
q1 = torch.randn(1,2)
q2 = torch.tensor([2 ,3])
q2 = q2.type(torch.float32)
q2.requires_grad_(True) # 开启跟踪
tensor 计算
最新推荐文章于 2023-01-16 12:14:22 发布