PyTorch--Tensor拼接、切分、置换

目录

1、拼接

torch.cat()

torch.stacks()

2、切分

torch.chunk()

torch.split()

 3、置换

1、拼接
torch.cat()

torch.cat(tensors,dim=0,out=None):将张量按照dim维度进行拼接

torch.stacks()

torch.stacks(tensors,dim=0,out=None):将张量在新创建的dim维度上进行拼接

(tensors:待拼接的张量序列,dim:要拼接的维度,out:输出的张量,如果指定,则返回的张量与out指向同一个地址)

import torch

# tensor拼接和拆分
a = torch.tensor([[1, 2, 3], [4, 5, 6]])
b = torch.tensor([[10, 10, 10], [10, 10, 10]])
print("a:", a.shape)
print("b:", b.shape)
# -------------------拼接--------------------
# 在指定的维度dim上,连接给定的一组张量,除了要拼接的维度外,其他维度数值必须保持一致
print("dim=0:\n", torch.cat((a, b), dim=0))
print(torch.cat((a, b), dim=0).shape)
# dim=0:
#  tensor([[ 1,  2,  3],
#         [ 4,  5,  6],
#         [ 7,  8,  9],
#         [10, 10, 10],
#         [10, 10, 10],
#         [10, 10, 10]])
# torch.Size([6, 3])

print("dim=1:\n", torch.cat((a, b), dim=1))
print(torch.cat((a, b), dim=1).shape)
# dim=1:
#  tensor([[ 1,  2,  3, 10, 10, 10],
#         [ 4,  5,  6, 10, 10, 10],
#         [ 7,  8,  9, 10, 10, 10]])
# torch.Size([3, 6])

print(torch.stack((a, b), dim=0))
print(torch.stack((a, b), dim=0).shape)
# tensor([[[ 1,  2,  3],
#          [ 4,  5,  6]],
#
#         [[10, 10, 10],
#          [10, 10, 10]]])
# torch.Size([2, 2, 3])

print(torch.stack((a, b), dim=1))
print(torch.stack((a, b), dim=1).shape)
# tensor([[[ 1,  2,  3],
#          [10, 10, 10]],
#
#         [[ 4,  5,  6],
#          [10, 10, 10]]])
# torch.Size([2, 2, 3])

print(torch.stack((a, b), dim=2))
print(torch.stack((a, b), dim=2).shape)
# tensor([[[ 1, 10],
#          [ 2, 10],
#          [ 3, 10]],
#
#         [[ 4, 10],
#          [ 5, 10],
#          [ 6, 10]]])
# torch.Size([2, 3, 2])
2、切分
torch.chunk()

torch.chunk(input,chunks,dim=0):将张量按照dim维度进行切分,若不能整除,则最后一份张量小于其他张量

(input:待切分的张量,chunks:切分的数量,dim:切分的维度)

t = torch.tensor([[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3], [4, 4, 4, 4, 4, 4]])
print(torch.chunk(t, 2, 0))
print(type(torch.chunk(t, 2, 0)))  # 切分之后的结果是一个元组,元组中有两个张量
# (tensor([[1, 1, 1, 1, 1, 1],
#         [2, 2, 2, 2, 2, 2]]), tensor([[3, 3, 3, 3, 3, 3],
#         [4, 4, 4, 4, 4, 4]]))
# <class 'tuple'>
print(torch.chunk(t, 2, 0)[0])
print(torch.chunk(t, 2, 0)[1])
# tensor([[1, 1, 1, 1, 1, 1],
#         [2, 2, 2, 2, 2, 2]])
# tensor([[3, 3, 3, 3, 3, 3],
#         [4, 4, 4, 4, 4, 4]])

print(torch.chunk(t, 3, 1))
print(torch.chunk(t, 3, 1)[0])
print(torch.chunk(t, 3, 1)[1])
# (tensor([[1, 1],
#         [2, 2],
#         [3, 3],
#         [4, 4]]), tensor([[1, 1],
#         [2, 2],
#         [3, 3],
#         [4, 4]]), tensor([[1, 1],
#         [2, 2],
#         [3, 3],
#         [4, 4]]))
# tensor([[1, 1],
#         [2, 2],
#         [3, 3],
#         [4, 4]])
# tensor([[1, 1],
#         [2, 2],
#         [3, 3],
#         [4, 4]])

t = torch.tensor([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3], [4, 4, 4, 4, 4, 4]],
                  [[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3], [4, 4, 4, 4, 4, 4]],
                  [[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3], [4, 4, 4, 4, 4, 4]]])
print(t.shape)
# torch.Size([3, 4, 6])
print(torch.chunk(t, 3, 0))
print(torch.chunk(t, 3, 0)[0].shape)
# (tensor([[[1, 1, 1, 1, 1, 1],
#          [2, 2, 2, 2, 2, 2],
#          [3, 3, 3, 3, 3, 3],
#          [4, 4, 4, 4, 4, 4]]]), tensor([[[1, 1, 1, 1, 1, 1],
#          [2, 2, 2, 2, 2, 2],
#          [3, 3, 3, 3, 3, 3],
#          [4, 4, 4, 4, 4, 4]]]), tensor([[[1, 1, 1, 1, 1, 1],
#          [2, 2, 2, 2, 2, 2],
#          [3, 3, 3, 3, 3, 3],
#          [4, 4, 4, 4, 4, 4]]]))
# torch.Size([1, 4, 6])

print(torch.chunk(t, 2, 1))
print(torch.chunk(t, 2, 1)[0].shape)
# (tensor([[[1, 1, 1, 1, 1, 1],
#          [2, 2, 2, 2, 2, 2],
#          [3, 3, 3, 3, 3, 3],
#          [4, 4, 4, 4, 4, 4]]]), tensor([[[1, 1, 1, 1, 1, 1],
#          [2, 2, 2, 2, 2, 2],
#          [3, 3, 3, 3, 3, 3],
#          [4, 4, 4, 4, 4, 4]]]), tensor([[[1, 1, 1, 1, 1, 1],
#          [2, 2, 2, 2, 2, 2],
#          [3, 3, 3, 3, 3, 3],
#          [4, 4, 4, 4, 4, 4]]]))
# torch.Size([1, 4, 6])

print(torch.chunk(t, 3, 2))
print(torch.chunk(t, 3, 2)[0].shape)
# (tensor([[[1, 1],
#          [2, 2],
#          [3, 3],
#          [4, 4]],
# 
#         [[1, 1],
#          [2, 2],
#          [3, 3],
#          [4, 4]],
# 
#         [[1, 1],
#          [2, 2],
#          [3, 3],
#          [4, 4]]]), tensor([[[1, 1],
#          [2, 2],
#          [3, 3],
#          [4, 4]],
# 
#         [[1, 1],
#          [2, 2],
#          [3, 3],
#          [4, 4]],
# 
#         [[1, 1],
#          [2, 2],
#          [3, 3],
#          [4, 4]]]), tensor([[[1, 1],
#          [2, 2],
#          [3, 3],
#          [4, 4]],
# 
#         [[1, 1],
#          [2, 2],
#          [3, 3],
#          [4, 4]],
# 
#         [[1, 1],
#          [2, 2],
#          [3, 3],
#          [4, 4]]]))
# torch.Size([3, 4, 2])
torch.split()

torch.split():将张量按照dim维度进行平均切分,可指定每一个分量的长度

torch.split(input,split_size_or_sections,dim=0)

(input:待切分的张量,split_size_or_sections:若为int,表示每一个分量的长度;若为list,按照list元素给分量分配长度;dim:切分的维度)

t = torch.tensor([[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3], [4, 4, 4, 4, 4, 4]])
print(torch.split(t, 2, dim=0))
# (tensor([[1, 1, 1, 1, 1, 1],
#         [2, 2, 2, 2, 2, 2]]), tensor([[3, 3, 3, 3, 3, 3],
#         [4, 4, 4, 4, 4, 4]]))

t1 = torch.split(t, [1, 2, 3], dim=1)
print(t1)
for i in t1:
    print(i)
# (tensor([[1],
#         [2],
#         [3],
#         [4]]), tensor([[1, 1],
#         [2, 2],
#         [3, 3],
#         [4, 4]]), tensor([[1, 1, 1],
#         [2, 2, 2],
#         [3, 3, 3],
#         [4, 4, 4]]))
# 
# tensor([[1],
#         [2],
#         [3],
#         [4]])
# tensor([[1, 1],
#         [2, 2],
#         [3, 3],
#         [4, 4]])
# tensor([[1, 1, 1],
#         [2, 2, 2],
#         [3, 3, 3],
#         [4, 4, 4]])
 3、置换
a = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(a)
# tensor([[1, 2, 3],
#         [4, 5, 6],
#         [7, 8, 9]])
# 转置,只接受二维的
print(a.T)
print(torch.t(a))
print(torch.transpose(a, 1, 0))  # transpose(a, 1, 0)和transpose(a, 0, 1)效果一致,都是dim[0]和dim[1]互换
# tensor([[1, 4, 7],
#         [2, 5, 8],
#         [3, 6, 9]])
print(a.permute(1, 0))  # 可接受多维的
# tensor([[1, 4, 7],
#         [2, 5, 8],
#         [3, 6, 9]])

c = torch.unsqueeze(a, 0)  # 三维张量
print(c)
print(c.shape)
# tensor([[[1, 2, 3],
#          [4, 5, 6],
#          [7, 8, 9]]])
# torch.Size([1, 3, 3])

print(c.permute(1, 0, 2))  # 0和1维度互换
print(c.permute(1, 0, 2).shape)
# tensor([[[1, 2, 3]],
#
#         [[4, 5, 6]],
#
#         [[7, 8, 9]]])
# torch.Size([3, 1, 3])

print(c.permute(2, 1, 0))
print(c.permute(2, 1, 0).shape)
# tensor([[[1],
#          [4],
#          [7]],
#
#         [[2],
#          [5],
#          [8]],
#
#         [[3],
#          [6],
#          [9]]])
# torch.Size([3, 3, 1])
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

ww'

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值