def test_transpose():
a = torch.randn(1, 2, 3, 4)
print(f"a: {a}")
print(f"a.shape: {a.shape}")
"""
a: tensor([[[[-0.0539, -0.8428, 0.3784, 0.0427],
[ 1.0160, 0.5905, -0.1011, -1.0597],
[-0.9667, -0.3643, 0.9854, 1.6471]],
[[-0.7310, 0.9642, -0.3973, -0.6779],
[ 0.9616, 0.6982, -1.5842, -0.0111],
[ 1.6410, 0.2154, -0.9575, -0.1388]]]])
a.shape: torch.Size([1, 2, 3, 4])
说明:a:代表4维张量,1:一个4维张量有1个三维;2:每个3维张量有2个三维;3:每个2维张量有3个1维;4:每个1维张量有4个标量
"""
b = a.transpose(1, 2)
# [1, 3, 2, 4]
print(f"b: {b}")
print(f"b.shape: {b.shape}")
c = a.view(1, 3, 2, 4)
print(f"c: {c}")
print(f"c.shape: {c.shape}")
打印结果:
a: tensor([[[[-0.5109, 0.5126, 1.1355, 0.4597],
[ 0.3988, 1.0356, -0.4229, 0.4401],
[-0.9824, -0.5800, 0.6165, 0.2332]],
[[-0.3949, 0.0813, 0.3128, 1.8720],
[-0.3286, 2.7728, 0.6836, -0.5554],
[-0.5357, 0.4901, 1.2495, 1.8860]]]])
a.shape: torch.Size([1, 2, 3, 4])
b: tensor([[[[-0.5109, 0.5126, 1.1355, 0.4597],
[-0.3949, 0.0813, 0.3128, 1.8720]],
[[ 0.3988, 1.0356, -0.4229, 0.4401],
[-0.3286, 2.7728, 0.6836, -0.5554]],
[[-0.9824, -0.5800, 0.6165, 0.2332],
[-0.5357, 0.4901, 1.2495, 1.8860]]]])
b.shape: torch.Size([1, 3, 2, 4])
c: tensor([[[[-0.5109, 0.5126, 1.1355, 0.4597],
[ 0.3988, 1.0356, -0.4229, 0.4401]],
[[-0.9824, -0.5800, 0.6165, 0.2332],
[-0.3949, 0.0813, 0.3128, 1.8720]],
[[-0.3286, 2.7728, 0.6836, -0.5554],
[-0.5357, 0.4901, 1.2495, 1.8860]]]])
c.shape: torch.Size([1, 3, 2, 4])
>>> x = torch.randn(4, 4)
>>> x.size()
torch.Size([4, 4])
>>> y = x.view(16)
>>> y.size()
torch.Size([16])
>>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
>>> z.size()
torch.Size([2, 8])
>>> a = torch.randn(1, 2, 3, 4)
>>> a.size()
torch.Size([1, 2, 3, 4])
>>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
>>> b.size()
torch.Size([1, 3, 2, 4])
>>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
>>> c.size()
torch.Size([1, 3, 2, 4])
>>> torch.equal(b, c)
False