张量的操作

1.张量的创建

a=torch.tensor([1,2,3],dtype=torch.float32)
print(a)#tensor([1., 2., 3.])

#创建全0tensor
z=torch.zeros(3,3)
print(z)
# tensor([[0., 0., 0.],
#         [0., 0., 0.],
#         [0., 0., 0.]])

#创建对角线为1的tensor
k=torch.eye(3,3)
print(k)
# tensor([[1., 0., 0.],
#         [0., 1., 0.],
#         [0., 0., 1.]])

#对角矩阵
x2=torch.diag(torch.tensor([1,2,3]))
print(x2)
# tensor([[1, 0, 0],
#         [0, 2, 0],
#         [0, 0, 3]])

#切分 linspace
print(torch.linspace(2,10,steps=4))#steps表示切分的个数
# tensor([ 2.0000,  4.6667,  7.3333, 10.0000])

#均匀分布,值在0-1
print(torch.rand(10))

#正态分布,均值为0,方差为1
print(torch.randn(10))
#torch.randn(batch,channel,height,width)

#正态分布随机
x1=torch.normal(mean=torch.zeros(4,4),std=torch.ones(4,4))

#选择随机数
print(torch.randperm(10))#0-9是个数随机打乱
# tensor([3, 1, 2, 7, 6, 9, 0, 5, 4, 8])

#生成一个区间的数
print(torch.arange(10,30,5))#开头、结尾、步长
# tensor([10, 15, 20, 25])

2.张量的检查torch.is_tensor(检查元素名称)

x=[2,9,8,5]
print(torch.is_tensor(x))#检查是否是一个tensor  False
y=torch.rand(1,2)
print(torch.is_tensor(y))#True

3.统计元素个数

print(torch.numel(y))#统计元素个数 2

4.numpy和tensor之间的转换

#将numpy转化为tensor
w=np.array([3,4,5,6])
v=torch.from_numpy(w)
print(v)
# tensor([3, 4, 5, 6], dtype=torch.int32)
#tensor转numpy
a=torch.zeros(4)
b=a.numpy()
print(a,b)
# tensor([0., 0., 0., 0.]) [0. 0. 0. 0.]

5.切片索引

许多都和numpy几乎一样。

(1)不规则切片

torch.index_select

a=torch.linspace(1,12,steps=12).view(2,6)
print(a)
b=torch.index_select(a,0,torch.tensor([0,1]))
print(b)
c=torch.index_select(a,1,torch.tensor([0,2]))
print(c)

torch.masked_select根据条件返回元素,注意张量 mask须跟input张量有相同数量的元素数目,但形状或维度不需要相同

pytorch中mask_select()的用法_燕小硕的博客-CSDN博客_mask_select

torch.take主要就是,将输入张量视为视为一维张量。结果tensor的形状与索引相同。

见博客详解

pytorch每日一学44(torch.take())根据索引返回指定索引上的数据集合_Fluid_ray的博客-CSDN博客_pytorch 返回索引

(2)修改张量的某些元素得到新的张量

torch.where

torch.masked_fill

torch.index_fill

a=torch.rand(10).view(2,5)
print(a)
# tensor([[0.9998, 0.5385, 0.3615, 0.0559, 0.9988],
#         [0.4596, 0.9342, 0.5472, 0.7440, 0.8707]])
b=torch.where(a>0.5,torch.tensor(1.0),torch.tensor(0.0))
print(b)
# tensor([[1., 1., 0., 0., 1.],
#         [0., 1., 1., 1., 1.]])
c=torch.index_fill(a,dim = 1,index = torch.tensor([0,2,4]),value = 100)
print(c)
# tensor([[1.0000e+02, 5.3855e-01, 1.0000e+02, 5.5887e-02, 1.0000e+02],
#         [1.0000e+02, 9.3425e-01, 1.0000e+02, 7.4401e-01, 1.0000e+02]])
d=torch.masked_fill(a,a>0.5,1)
print(d)
# tensor([[1.0000, 1.0000, 0.3615, 0.0559, 1.0000],
#         [0.4596, 1.0000, 1.0000, 1.0000, 1.0000]])

6.维度变换

tensor.reshape,类似tensor的view方法

tensor.squeeze、tensor.unsqueeze可以添加去掉维度或者添加维度

a=torch.rand([1,2,4,5])
print(a.shape)#torch.Size([1, 2, 4, 5])
print(a)
# tensor([[[[0.4281, 0.2015, 0.0614, 0.5061, 0.6283],
#           [0.7777, 0.0322, 0.4097, 0.2483, 0.2258],
#           [0.7085, 0.3550, 0.1153, 0.5201, 0.2608],
#           [0.8606, 0.4883, 0.1230, 0.2008, 0.4587]],
# 
#          [[0.1211, 0.3047, 0.4763, 0.5203, 0.1127],
#           [0.2784, 0.1144, 0.5456, 0.1764, 0.0288],
#           [0.6109, 0.2346, 0.8698, 0.4185, 0.2284],
#           [0.4811, 0.8747, 0.8924, 0.1246, 0.7283]]]])
b=a.view(1,8,5)
print(b.shape)#torch.Size([1, 8, 5])
print(b)
# tensor([[[0.4281, 0.2015, 0.0614, 0.5061, 0.6283],
#          [0.7777, 0.0322, 0.4097, 0.2483, 0.2258],
#          [0.7085, 0.3550, 0.1153, 0.5201, 0.2608],
#          [0.8606, 0.4883, 0.1230, 0.2008, 0.4587],
#          [0.1211, 0.3047, 0.4763, 0.5203, 0.1127],
#          [0.2784, 0.1144, 0.5456, 0.1764, 0.0288],
#          [0.6109, 0.2346, 0.8698, 0.4185, 0.2284],
#          [0.4811, 0.8747, 0.8924, 0.1246, 0.7283]]])
c=torch.reshape(b,[1,2,4,5])
print(c)
# tensor([[[[0.4281, 0.2015, 0.0614, 0.5061, 0.6283],
#           [0.7777, 0.0322, 0.4097, 0.2483, 0.2258],
#           [0.7085, 0.3550, 0.1153, 0.5201, 0.2608],
#           [0.8606, 0.4883, 0.1230, 0.2008, 0.4587]],
# 
#          [[0.1211, 0.3047, 0.4763, 0.5203, 0.1127],
#           [0.2784, 0.1144, 0.5456, 0.1764, 0.0288],
#           [0.6109, 0.2346, 0.8698, 0.4185, 0.2284],
#           [0.4811, 0.8747, 0.8924, 0.1246, 0.7283]]]])
d=torch.squeeze(a)
print(d.shape)#torch.Size([2, 4, 5])
print(d)
# tensor([[[0.4281, 0.2015, 0.0614, 0.5061, 0.6283],
#          [0.7777, 0.0322, 0.4097, 0.2483, 0.2258],
#          [0.7085, 0.3550, 0.1153, 0.5201, 0.2608],
#          [0.8606, 0.4883, 0.1230, 0.2008, 0.4587]],
# 
#         [[0.1211, 0.3047, 0.4763, 0.5203, 0.1127],
#          [0.2784, 0.1144, 0.5456, 0.1764, 0.0288],
#          [0.6109, 0.2346, 0.8698, 0.4185, 0.2284],
#          [0.4811, 0.8747, 0.8924, 0.1246, 0.7283]]])
e=torch.unsqueeze(d,axis=1)
print(e.shape)#torch.Size([2, 1, 4, 5])
print(e)
# tensor([[[[0.4281, 0.2015, 0.0614, 0.5061, 0.6283],
#           [0.7777, 0.0322, 0.4097, 0.2483, 0.2258],
#           [0.7085, 0.3550, 0.1153, 0.5201, 0.2608],
#           [0.8606, 0.4883, 0.1230, 0.2008, 0.4587]]],
# 
# 
#         [[[0.1211, 0.3047, 0.4763, 0.5203, 0.1127],
#           [0.2784, 0.1144, 0.5456, 0.1764, 0.0288],
#           [0.6109, 0.2346, 0.8698, 0.4185, 0.2284],
#           [0.4811, 0.8747, 0.8924, 0.1246, 0.7283]]]])

torch.transpose可以交换张量的维度,常用于图片存储格式的变换上

#batch,height,width,channel
data=torch.rand([100,256,256,3])
print(data.shape)
# torch.Size([100, 256, 256, 3])
data_t=torch.transpose(torch.transpose(data,1,2),1,3)
print(data_t.shape)
# torch.Size([100, 3, 256, 256])

7.合并分割

torch.cat不增加维度的连接

torch.stack堆叠

torch.split分割张量,是torch.cat的逆运算

a=torch.randn(2,6)
b=torch.zeros(2,6)
c=torch.cat([a,b],dim=0)
print(c)
# tensor([[-0.8208,  0.9579,  1.0119,  1.2694,  1.3407,  1.9926],
#         [ 0.6872, -0.3352,  1.3424, -1.3681,  0.7423, -0.0288],
#         [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
#         [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000]])
d=torch.cat([a,b],dim=1)
print(d)
# tensor([[-0.8208,  0.9579,  1.0119,  1.2694,  1.3407,  1.9926,  0.0000,  0.0000,
#           0.0000,  0.0000,  0.0000,  0.0000],
#         [ 0.6872, -0.3352,  1.3424, -1.3681,  0.7423, -0.0288,  0.0000,  0.0000,
#           0.0000,  0.0000,  0.0000,  0.0000]])
e=torch.stack([a,b],dim=0)
f=torch.stack([a,b],dim=1)
print(e)
# tensor([[[-0.8208,  0.9579,  1.0119,  1.2694,  1.3407,  1.9926],
#          [ 0.6872, -0.3352,  1.3424, -1.3681,  0.7423, -0.0288]],
# 
#         [[ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
#          [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000]]])
print(f)
# tensor([[[-0.8208,  0.9579,  1.0119,  1.2694,  1.3407,  1.9926],
#          [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000]],
# 
#         [[ 0.6872, -0.3352,  1.3424, -1.3681,  0.7423, -0.0288],
#          [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000]]])
g,h=torch.split(c,split_size_or_sections=2,dim=0)
i,j=torch.split(c,split_size_or_sections=[5,1],dim=1)
print(g)
# tensor([[-0.8208,  0.9579,  1.0119,  1.2694,  1.3407,  1.9926],
#         [ 0.6872, -0.3352,  1.3424, -1.3681,  0.7423, -0.0288]])
print(h)
# tensor([[0., 0., 0., 0., 0., 0.],
#         [0., 0., 0., 0., 0., 0.]])
print(i)
# tensor([[-0.8208,  0.9579,  1.0119,  1.2694,  1.3407],
#         [ 0.6872, -0.3352,  1.3424, -1.3681,  0.7423],
#         [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
#         [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000]])
print(j)
# tensor([[ 1.9926],
#         [-0.0288],
#         [ 0.0000],
#         [ 0.0000]])

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值