pytorch

提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档


torch.meshgrid()

a=torch.tensor([1,2,3])
b=torch.tensor([4,5])
c,d=torch.meshgrid(a,b)

c,d 的size都是 [a.size,b.size]=[3,2] (a:每一个元素,b:重复数)

c:tensor([[1, 1],
         [2, 2],
         [3, 3]])
c,shape=[3,2]
d:tensor([[4, 5],
         [4, 5],
         [4, 5]])
d.shape=[3,2]

torch.stack()

T1 = torch.tensor([[1, 2, 3],
        		[4, 5, 6],
        		[7, 8, 9]])
T2 = torch.tensor([[10, 20, 30],
        		[40, 50, 60],
        		[70, 80, 90]])
print(torch.stack((T1,T2),dim=1).shape)
print(torch.stack((T1,T2),dim=1))

沿着一个新维度对输入张量序列进行连接。

torch.Size([3, 2, 3])
tensor([[[ 1,  2,  3],
         [10, 20, 30]],

        [[ 4,  5,  6],
         [40, 50, 60]],

        [[ 7,  8,  9],
         [70, 80, 90]]])

torch.view()

T1 = torch.tensor([[1, 2, 3],
        		[4, 5, 6],
        		[7, 8, 9]])
T2 = torch.tensor([[10, 20, 30],
        		[40, 50, 60],
        		[70, 80, 90]])
T3=torch.stack((T1,T2),dim=1).view(1, 1, 3, 3, 2)
print(T3.shape)

相当于numpy中resize的功能

torch.Size([1, 1, 3, 3, 2])

torch.permute()

a=torch.tensor([[[1,2,3],[4,5,6]]])
print(a.shape)
b=a.permute(2,0,1)
print(b.shape)

将tensor的维度换位

torch.Size([1, 2, 3])
torch.Size([3, 1, 2])

torch.reshape(-1)

a=torch.arange(12).reshape(1,2,3,2)
print(a.shape)
b=a.reshape(1,2,1,-1)
print(b.shape)

-1:自动计算

torch.Size([1, 2, 3, 2])
torch.Size([1, 2, 1, 6])

a[…,:3]

a=torch.arange(12).reshape(1,2,6)
print(a)
print(a.shape)
print(a[...,:3])
print(a[...,:3].shape)
tensor([[[ 0,  1,  2,  3,  4,  5],
         [ 6,  7,  8,  9, 10, 11]]])
torch.Size([1, 2, 6])
tensor([[[0, 1, 2],
         [6, 7, 8]]])
torch.Size([1, 2, 3])

(a+b)

a=torch.arange(6).reshape(2,1,3)
b=torch.arange(24).reshape(2,4,3)
print((a+b).shape)

好像得有俩相同,而且不同的那个是1

torch.Size([2, 4, 3])

_pair(stride) for stride in strides

import torch
from torch.nn.modules.utils import _pair
strides=[8,16,32]
strides = [_pair(stride) for stride in strides]
print(strides)
print(strides[0])
[(8, 8), (16, 16), (32, 32)]
(8, 8)

torch.arange(0,4)

a=torch.arange(4)
b=torch.arange(0,4)
print(a)
print(b)
tensor([0, 1, 2, 3])
tensor([0, 1, 2, 3])

reshape(-1)

a=torch.arange(12).reshape(3,4)
print(a)
print(a.reshape(-1))
tensor([[ 0,  1,  2,  3],
        [ 4,  5,  6,  7],
        [ 8,  9, 10, 11]])
tensor([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11])

new_full

a=torch.arange(12).reshape(-1)
print(a)
print(a.shape)
b= a.new_full((a.shape[0], ), 2)
print(b)
print(b.shape)

构造一个跟a大小相同的向量,里面的值是2

tensor([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11])
torch.Size([12])
tensor([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
torch.Size([12])

torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1)

shift_xx=torch.arange(12).reshape(-1)
shift_yy=torch.arange(12).reshape(-1)
stride_w= shift_xx.new_full((shift_xx.shape[0], ), 8)
stride_h=shift_xx.new_full((shift_xx.shape[0], ), 8)
shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1)
print(shifts)
print(shifts.shape)
tensor([[ 0,  0,  8,  8],
        [ 1,  1,  8,  8],
        [ 2,  2,  8,  8],
        [ 3,  3,  8,  8],
        [ 4,  4,  8,  8],
        [ 5,  5,  8,  8],
        [ 6,  6,  8,  8],
        [ 7,  7,  8,  8],
        [ 8,  8,  8,  8],
        [ 9,  9,  8,  8],
        [10, 10,  8,  8],
        [11, 11,  8,  8]])
torch.Size([12, 4])

torch.cat(A,dim=1)

a=torch.arange(6).reshape(1,2,3)
b=torch.arange(12).reshape(1,4,3)
c=torch.arange(18).reshape(1,6,3)
A=[a,b,c]
d=torch.cat(A,dim=1)
print(d.shape)
print(d)
torch.Size([1, 12, 3])
tensor([[[ 0,  1,  2],
         [ 3,  4,  5],
         [ 0,  1,  2],
         [ 3,  4,  5],
         [ 6,  7,  8],
         [ 9, 10, 11],
         [ 0,  1,  2],
         [ 3,  4,  5],
         [ 6,  7,  8],
         [ 9, 10, 11],
         [12, 13, 14],
         [15, 16, 17]]])

d=torch.cat(A)

a=torch.arange(12).reshape(3,4)
b=torch.arange(8).reshape(2,4)
c=torch.arange(8).reshape(2,4)
A=[a,b,c]
d=torch.cat(A)
print(d.shape)
print(d)
torch.Size([7, 4])
tensor([[ 0,  1,  2,  3],
        [ 4,  5,  6,  7],
        [ 8,  9, 10, 11],
        [ 0,  1,  2,  3],
        [ 4,  5,  6,  7],
        [ 0,  1,  2,  3],
        [ 4,  5,  6,  7]])

tensor.detach()

返回一个新的tensor,从当前计算图中分离下来的,但是仍指向原变量的存放位置,不同之处只是requires_grad为false,得到的这个tensor永远不需要计算其梯度,不具有grad。

即使之后重新将它的requires_grad置为true,它也不会具有梯度grad

这样我们就会继续使用这个新的tensor进行计算,后面当我们进行反向传播时,到该调用detach()的tensor就会停止,不能再继续向前进行传播

注意:

使用detach返回的tensor和原始的tensor共同一个内存,即一个修改另一个也会跟着改变。

torch.squeeze()

torch.unsqueeze()这个函数主要是对数据维度进行扩充。给指定位置加上维数为一的维度

a=torch.arange(24).reshape(6,4)
print(a.shape)
print(a.unsqueeze(0))
print(a.unsqueeze(0).shape)
torch.Size([6, 4])
tensor([[[ 0,  1,  2,  3],
         [ 4,  5,  6,  7],
         [ 8,  9, 10, 11],
         [12, 13, 14, 15],
         [16, 17, 18, 19],
         [20, 21, 22, 23]]])
torch.Size([1, 6, 4])

a[…,:2]*b[:,:2]+b[:,:2]

a=torch.arange(24).reshape(2,3,4)
b=torch.arange(9).reshape(3,3)
print(a[...,:2])
print(a[...,2:])
print(b[:,:2])
print(a[...,:2]*b[:,:2])
print((a[...,:2]*b[:,:2]).shape)
print(a[...,:2]*b[:,:2]+b[:,:2])
c = a[..., 2:].exp() * b[:, 2:]
xys=(a[...,:2]*b[:,:2]+b[:,:2])
whs=a[...,2:].exp() * b[:, 2:]
print(c.shape)
print(xys[...,0].shape)
print(xys[...,0])
tl_x = (xys[..., 0] - whs[..., 0] / 2)
tl_y = (xys[..., 1] - whs[..., 1] / 2)
br_x = (xys[..., 0] + whs[..., 0] / 2)
br_y = (xys[..., 1] + whs[..., 1] / 2)
decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)
print(decoded_bboxes.shape)
tensor([[[ 0,  1],
         [ 4,  5],
         [ 8,  9]],

        [[12, 13],
         [16, 17],
         [20, 21]]])
tensor([[[ 2,  3],
         [ 6,  7],
         [10, 11]],

        [[14, 15],
         [18, 19],
         [22, 23]]])
tensor([[0, 1],
        [3, 4],
        [6, 7]])
tensor([[[  0,   1],
         [ 12,  20],
         [ 48,  63]],

        [[  0,  13],
         [ 48,  68],
         [120, 147]]])
torch.Size([2, 3, 2])
tensor([[[  0,   2],
         [ 15,  24],
         [ 54,  70]],

        [[  0,  14],
         [ 51,  72],
         [126, 154]]])
torch.Size([2, 3, 2])
torch.Size([2, 3])
tensor([[  0,  15,  54],
        [  0,  51, 126]])
torch.Size([2, 3, 4])

torch.repeat()

pytorch中的repeat()函数可以对张量进行复制。

a=torch.arange(24).reshape(1,6,4)
print(a.repeat(2, 1, 1))
print(a.repeat(2, 1, 1).shape)
tensor([[[ 0,  1,  2,  3],
         [ 4,  5,  6,  7],
         [ 8,  9, 10, 11],
         [12, 13, 14, 15],
         [16, 17, 18, 19],
         [20, 21, 22, 23]],

        [[ 0,  1,  2,  3],
         [ 4,  5,  6,  7],
         [ 8,  9, 10, 11],
         [12, 13, 14, 15],
         [16, 17, 18, 19],
         [20, 21, 22, 23]]])
torch.Size([2, 6, 4])

a[:,2:] a[:,:2]

a=torch.arange(12).reshape(3,4)
b=a[:,:2]
c=a[:,2:]
print(b)
print(c)
tensor([[0, 1],
        [4, 5],
        [8, 9]])
tensor([[ 2,  3],
        [ 6,  7],
        [10, 11]])

c=a[b]

a=torch.arange(12).reshape(3,4)
b=torch.tensor([True,False,True])
print(a)
c=a[b]
print(c)
tensor([[ 0,  1,  2,  3],
        [ 4,  5,  6,  7],
        [ 8,  9, 10, 11]])
tensor([[ 0,  1,  2,  3],
        [ 8,  9, 10, 11]])

simOTA

a=torch.tensor([1,0,0,1,5])
b=torch.tensor([True,False,False,True,True])
print(a[b])

只取出true部分对应的元素

tensor([1, 1, 5])

**kwargs

常规参数列表中只有一个变量’a’.但是通过”**kwargs”,可以传多个键值参数。
kwargs接收除常规参数列表职位之外的键值参数字典。在这里’kwargs’是个字典。

def fun(a, **kwargs):
    print (a, kwargs)

fun(1, b=4, c=5)
fun(45, b=6, c=7, d=8)
1 {'b': 4, 'c': 5}
45 {'b': 6, 'c': 7, 'd': 8}
  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值