张量的存放位置
import torch
dev = torch.device("cpu") # 将tensor放置在cpu上,用cpu进行计算
a = torch.tensor([2,2],evice=dev)
print(a)
输出:tensor([2, 2])
dev = torch.device("cpu") # 将tensor放置在cpu上,用cpu进行计算
dev = torch.device("cuda") # 存放在cuda上
a = torch.tensor([2,2],
dtype=torch.float32, # 定义float类型
device=dev)
print(a)
输出:tensor([2., 2.], device='cuda:0')
稀疏的张量转为稠密张量
### 定义稀疏张量
i = torch.tensor([[0,1,2],[0,1,2]])
v = torch.tensor([1,2,3])
a = torch.sparse_coo_tensor(i,v,(4,4))
print(a)
输出:
tensor(indices=tensor([[0, 1, 2],
[0, 1, 2]]),
values=tensor([1, 2, 3]),
size=(4, 4), nnz=3, layout=torch.sparse_coo)
转为稠密的张量
i = torch.tensor([[0,1,2],[0,1,2]])
v = torch.tensor([1,2,3])
a = torch.sparse_coo_tensor(i,v,(4,4),
dtype=torch.float32,
device=dev).to_dense()
输出:
tensor([[1., 0., 0., 0.],
[0., 2., 0., 0.],
[0., 0., 3., 0.],
[0., 0., 0., 0.]], device='cuda:0')