import numpy as np
import torch
# data->tensor
data = [[1, 2], [3, 4]]
x_data = torch.tensor(data)
print(x_data)
# np->tensor
np_array = np.array(data)
x_np = torch.from_numpy(np_array)
print(x_np)
# tensor->tensor
x_ones = torch.ones_like(x_data)
print(f"Ones Tensor: \n {x_ones} \n")
x_rand = torch.rand_like(x_data, dtype=torch.float) #重写 x_data 的数据类型
print(f"Random Tensor: \n {x_rand} \n")
# shape->tensor
shape = (2,3,)
rand_tensor = torch.rand(shape)
ones_tensor = torch.ones(shape)
zeros_tensor = torch.zeros(shape)
print(f"Random Tensor: \n {rand_tensor} \n")
print(f"Ones Tensor: \n {ones_tensor} \n")
print(f"Zeros Tensor: \n {zeros_tensor} \n")
#%%
# tensor =>shape , dtype , device
tensor = torch.rand(3,4)
print(f"Shape of tensor:{tensor.shape}")
print(f"Datatype of tensor:{tensor.dtype}")
print(f"tensor Device GPU or cpu:{tensor.device}")
# tensor = tensor.cuda()
# print(f"tensor Device GPU or cpu:{tensor.device}")
##%% 张量运算
tensor = torch.rand(3,4)
if torch.cuda.is_available():
tensor = tensor.to('cuda')
print(f"tensor Device GPU or cpu:{tensor.device}")
# 1)索引和切片
tensor = torch.ones(4, 4)
tensor[:,1:] = 0
print(tensor)
# 2)拼接
tensor2 = torch.ones_like(tensor)
t1 = torch.cat([tensor, tensor2, tensor], dim=1)
print(t1)
# 3)乘积和矩阵乘法
print(f"tensor.mul(tensor):\n{tensor.mul(tensor)}\n")
# ==
print(f"tensor * tensor:\n {tensor * tensor}")
print(f"tensor.matmul(tensor.T): \n {tensor.matmul(tensor.T)} \n")
print(f"tensor @ tensor.T: \n {tensor @ tensor.T}")
# 4)自动赋值运算 x.copy_(y),x.t_()
print(tensor, "\n")
tensor.add_(5)
print(tensor)
##tensor->numpy 在cpu 共用一块内存区域,改变一个另一个也会改变
t = torch.ones(5)
print(f"t:{t}")
n = t.numpy()
print(f"n:{n}")
t.add_(1)
print(f"t:{t}")
print(f"n:{n}")
## numpy array =>tensor,修改array的值,张量值也会变
n = np.ones(5)
t = torch.from_numpy(n)
np.add(n,1,out=n)
print(f"{t}\n")
print(f"{n}\n")
一、张量-pytorch
最新推荐文章于 2022-10-11 17:28:18 发布