pytorch_查用API

Tensor的创建

import torch
torch.__version__
'1.13.1+cu116'
torch.cuda.is_available()
True
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
a=torch.rand(5,3,dtype=torch.float32)
b=torch.ones(5,3,requires_grad=True)
b.to(device)
tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]], device='cuda:0', grad_fn=<ToCopyBackward0>)
torch.from_numpy((b+a).numpy()).to(device)
b=(b+2)*b*3
out=b.mean()
out.backward()
b
tensor([[9., 9., 9.],
        [9., 9., 9.],
        [9., 9., 9.],
        [9., 9., 9.],
        [9., 9., 9.]], grad_fn=<MulBackward0>)
b.type()
'torch.FloatTensor'
torch.normal(mean=0.0,std=torch.rand(5))
torch.Tensor(2,3).uniform_(-1,5)
tensor([[-0.4962,  3.7283,  0.0954],
        [ 3.5756, -0.8013,  4.5442]])
#打乱顺序
index=torch.randperm(15)
tensor([14,  7,  6,  3, 11, 10, 13,  8,  5,  4,  9, 12,  2,  0,  1])

Tensor的算术运算

c=a+b
print(a)
c=torch.add(a,b)
print(a)
c=a.add_(b)
a
tensor([[18.0381, 18.1101, 18.4486],
        [18.2338, 18.6109, 18.8925],
        [18.9273, 18.0632, 18.6523],
        [18.6284, 18.3229, 18.9771],
        [18.3819, 18.5554, 18.8100]], grad_fn=<AddBackward0>)
tensor([[18.0381, 18.1101, 18.4486],
        [18.2338, 18.6109, 18.8925],
        [18.9273, 18.0632, 18.6523],
        [18.6284, 18.3229, 18.9771],
        [18.3819, 18.5554, 18.8100]], grad_fn=<AddBackward0>)





tensor([[27.0381, 27.1101, 27.4486],
        [27.2338, 27.6109, 27.8925],
        [27.9273, 27.0632, 27.6523],
        [27.6284, 27.3229, 27.9771],
        [27.3819, 27.5554, 27.8100]], grad_fn=<AddBackward0>)
c=a=b
print(a)
c=torch.sub(a,b)
print(a)
c=a.sub_(b)
a,c
tensor([[0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.]], grad_fn=<SubBackward0>)
tensor([[0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.]], grad_fn=<SubBackward0>)





(tensor([[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]], grad_fn=<SubBackward0>),
 tensor([[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]], grad_fn=<SubBackward0>))
torch.mm(b.T,a),torch.matmul(a.T,b)
(tensor([[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]], grad_fn=<MmBackward0>),
 tensor([[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]], grad_fn=<MmBackward0>))

Tensor的索引与数据筛选

x=torch.rand(4,4)
y=torch.rand(4,4)
out=torch.where(x>0.4,x,y)
out,x,y
(tensor([[0.9865, 0.6054, 0.6724, 0.2938],
         [0.0653, 0.9244, 0.9957, 0.9819],
         [0.9005, 0.9081, 0.7058, 0.8843],
         [0.7415, 0.5732, 0.6546, 0.5844]]),
 tensor([[9.8650e-01, 6.0536e-01, 6.7243e-01, 2.7408e-01],
         [3.8264e-01, 9.2444e-01, 3.0863e-01, 9.8186e-01],
         [9.0049e-01, 9.0812e-01, 7.0580e-01, 2.1439e-02],
         [5.2613e-04, 1.6204e-01, 6.5463e-01, 5.8436e-01]]),
 tensor([[0.9536, 0.8117, 0.0754, 0.2938],
         [0.0653, 0.0234, 0.9957, 0.0589],
         [0.0222, 0.1991, 0.4073, 0.8843],
         [0.7415, 0.5732, 0.1178, 0.9670]]))
x,torch.index_select(x,dim=0,index=torch.tensor([0,3,2]))
(tensor([[9.8650e-01, 6.0536e-01, 6.7243e-01, 2.7408e-01],
         [3.8264e-01, 9.2444e-01, 3.0863e-01, 9.8186e-01],
         [9.0049e-01, 9.0812e-01, 7.0580e-01, 2.1439e-02],
         [5.2613e-04, 1.6204e-01, 6.5463e-01, 5.8436e-01]]),
 tensor([[9.8650e-01, 6.0536e-01, 6.7243e-01, 2.7408e-01],
         [5.2613e-04, 1.6204e-01, 6.5463e-01, 5.8436e-01],
         [9.0049e-01, 9.0812e-01, 7.0580e-01, 2.1439e-02]]))
z=torch.linspace(1,16,16)
z=z.view(4,4)
mask=torch.gt(z,8)
torch.masked_select(z,mask)
tensor([ 9., 10., 11., 12., 13., 14., 15., 16.])
z,torch.take(z,index=torch.tensor([1,15,13,11]))
(tensor([[ 1.,  2.,  3.,  4.],
         [ 5.,  6.,  7.,  8.],
         [ 9., 10., 11., 12.],
         [13., 14., 15., 16.]]),
 tensor([ 2., 16., 14., 12.]))
torch.nonzero(z)
tensor([[0, 0],
        [0, 1],
        [0, 2],
        [0, 3],
        [1, 0],
        [1, 1],
        [1, 2],
        [1, 3],
        [2, 0],
        [2, 1],
        [2, 2],
        [2, 3],
        [3, 0],
        [3, 1],
        [3, 2],
        [3, 3]])

Tensor的组合与拼接

a=torch.zeros_like(a)
b=torch.ones(b.shape)
torch.cat((a,b),dim=1).shape
torch.Size([5, 6])
res=torch.stack((a,b),dim=1)
res,res.shape,a.shape,b.shape
(tensor([[[0., 0., 0.],
          [1., 1., 1.]],
 
         [[0., 0., 0.],
          [1., 1., 1.]],
 
         [[0., 0., 0.],
          [1., 1., 1.]],
 
         [[0., 0., 0.],
          [1., 1., 1.]],
 
         [[0., 0., 0.],
          [1., 1., 1.]]]),
 torch.Size([5, 2, 3]),
 torch.Size([5, 3]),
 torch.Size([5, 3]))

Tensor的切片

torch.chunk(x,1,dim=1)
(tensor([[9.8650e-01, 6.0536e-01, 6.7243e-01, 2.7408e-01],
         [3.8264e-01, 9.2444e-01, 3.0863e-01, 9.8186e-01],
         [9.0049e-01, 9.0812e-01, 7.0580e-01, 2.1439e-02],
         [5.2613e-04, 1.6204e-01, 6.5463e-01, 5.8436e-01]]),)
torch.split(x,1,dim=0)
(tensor([[0.9865, 0.6054, 0.6724, 0.2741]]),
 tensor([[0.3826, 0.9244, 0.3086, 0.9819]]),
 tensor([[0.9005, 0.9081, 0.7058, 0.0214]]),
 tensor([[5.2613e-04, 1.6204e-01, 6.5463e-01, 5.8436e-01]]))

Tensor的变形操作

z.shape,torch.t(z),torch.transpose(z,0,1)
(torch.Size([4, 4]),
 tensor([[ 1.,  5.,  9., 13.],
         [ 2.,  6., 10., 14.],
         [ 3.,  7., 11., 15.],
         [ 4.,  8., 12., 16.]]),
 tensor([[ 1.,  5.,  9., 13.],
         [ 2.,  6., 10., 14.],
         [ 3.,  7., 11., 15.],
         [ 4.,  8., 12., 16.]]))
data=torch.rand(1,2,3)
data,torch.squeeze(data)
(tensor([[[0.2425, 0.9062, 0.6482],
          [0.6508, 0.1638, 0.6338]]]),
 tensor([[0.2425, 0.9062, 0.6482],
         [0.6508, 0.1638, 0.6338]]))
data.shape,torch.unsqueeze(data,-1).shape
(torch.Size([1, 2, 3]), torch.Size([1, 2, 3, 1]))
torch.unbind(data,dim=2)
(tensor([[0.2425, 0.6508]]),
 tensor([[0.9062, 0.1638]]),
 tensor([[0.6482, 0.6338]]))
data,torch.flip(data,dims=[0])
(tensor([[[0.2425, 0.9062, 0.6482],
          [0.6508, 0.1638, 0.6338]]]),
 tensor([[[0.2425, 0.9062, 0.6482],
          [0.6508, 0.1638, 0.6338]]]))

Tensor的填充操作

torch.full((4,5),10)
tensor([[10, 10, 10, 10, 10],
        [10, 10, 10, 10, 10],
        [10, 10, 10, 10, 10],
        [10, 10, 10, 10, 10]])
  • 5
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值