3-8Tensor的算术运算编程实例

加法

import torch
#add
a= torch.rand(2,3)
b = torch.rand(2,3)
print(a)
print(b)

print(a + b)
print(a.add(b))
print(torch.add(a,b))
print(a.add_(b))
print(a)
tensor([[0.7618, 0.5073, 0.2798],
        [0.5100, 0.6869, 0.7509]])
tensor([[0.6028, 0.2192, 0.9197],
        [0.1769, 0.8318, 0.5133]])
tensor([[1.3646, 0.7266, 1.1995],
        [0.6870, 1.5187, 1.2642]])
tensor([[1.3646, 0.7266, 1.1995],
        [0.6870, 1.5187, 1.2642]])
tensor([[1.3646, 0.7266, 1.1995],
        [0.6870, 1.5187, 1.2642]])
tensor([[1.3646, 0.7266, 1.1995],
        [0.6870, 1.5187, 1.2642]])
tensor([[1.3646, 0.7266, 1.1995],
        [0.6870, 1.5187, 1.2642]])

减法

##sub
print(a -b)
print(torch.sub(a ,b))
print(a.sub(b))
print(a.sub_(b))
print(a)
tensor([[ 0.1590,  0.2881, -0.6399],
        [ 0.3331, -0.1449,  0.2377]])
tensor([[ 0.1590,  0.2881, -0.6399],
        [ 0.3331, -0.1449,  0.2377]])
tensor([[ 0.1590,  0.2881, -0.6399],
        [ 0.3331, -0.1449,  0.2377]])
tensor([[ 0.1590,  0.2881, -0.6399],
        [ 0.3331, -0.1449,  0.2377]])
tensor([[ 0.1590,  0.2881, -0.6399],
        [ 0.3331, -0.1449,  0.2377]])

哈达玛乘积

##mul 哈达玛
print(a)
print(a * b)
print(torch.mul(a , b))
print(a.mul(b))
print(a.mul_(b))
print(a)
tensor([[ 0.0578,  0.0138, -0.5412],
        [ 0.0104, -0.1003,  0.0626]])
tensor([[ 0.0348,  0.0030, -0.4977],
        [ 0.0018, -0.0834,  0.0321]])
tensor([[ 0.0348,  0.0030, -0.4977],
        [ 0.0018, -0.0834,  0.0321]])
tensor([[ 0.0348,  0.0030, -0.4977],
        [ 0.0018, -0.0834,  0.0321]])
tensor([[ 0.0348,  0.0030, -0.4977],
        [ 0.0018, -0.0834,  0.0321]])
tensor([[ 0.0348,  0.0030, -0.4977],
        [ 0.0018, -0.0834,  0.0321]])

除法


#div
print(a / b)
print(torch.div(a,b))
print(a.div(b))
print(a.div_(b))
print(a)
tensor([[ 0.0959,  0.0632, -0.5884],
        [ 0.0589, -0.1206,  0.1220]])
tensor([[ 0.0959,  0.0632, -0.5884],
        [ 0.0589, -0.1206,  0.1220]])
tensor([[ 0.0959,  0.0632, -0.5884],
        [ 0.0589, -0.1206,  0.1220]])
tensor([[ 0.0959,  0.0632, -0.5884],
        [ 0.0589, -0.1206,  0.1220]])
tensor([[ 0.0959,  0.0632, -0.5884],
        [ 0.0589, -0.1206,  0.1220]])

矩阵运算

#矩阵运算
a = torch.ones(2,1)
b = torch.ones(1,2)
print(a @ b)
print(a.matmul(b))
print(torch.matmul(a,b))
print(torch.mm(a,b))
print(a.mm(b))
tensor([[1., 1.],
        [1., 1.]])
tensor([[1., 1.],
        [1., 1.]])
tensor([[1., 1.],
        [1., 1.]])
tensor([[1., 1.],
        [1., 1.]])
tensor([[1., 1.],
        [1., 1.]])

高维Tensor

#高维tensor
#保证最后两个维度是可以进行矩阵计算的
a = torch.ones(1,2,3,4)
b = torch.ones(1,2,4,3)
print(a.matmul(b))
print(a.matmul(b).shape)
tensor([[[[4., 4., 4.],
          [4., 4., 4.],
          [4., 4., 4.]],

         [[4., 4., 4.],
          [4., 4., 4.],
          [4., 4., 4.]]]])
torch.Size([1, 2, 3, 3])

指数运算

##pow
a = torch.tensor([1,2])
print(a)
print(torch.pow(a,3))
print(a ** 3)
print(a.pow_(3))
print(a)
tensor([1, 2])
tensor([1, 8])
tensor([1, 8])
tensor([1, 8])
tensor([1, 8])

e

#exp
a = torch.tensor([1,2],dtype = float)
print(a.type())#long.tensor
print(torch.exp(a))
print(torch.exp_(a))

print(a.exp_())
print(a.exp_())
torch.DoubleTensor
tensor([2.7183, 7.3891], dtype=torch.float64)
tensor([2.7183, 7.3891], dtype=torch.float64)
tensor([  15.1543, 1618.1780], dtype=torch.float64)
tensor([3814279.1048,          inf], dtype=torch.float64)

log

#log
a = torch.tensor([10,2],dtype = torch.float32)
print(torch.log(a))
print(torch.log_(a))
print(a.log())
print(a.log_())
tensor([2.3026, 0.6931])
tensor([2.3026, 0.6931])
tensor([ 0.8340, -0.3665])
tensor([ 0.8340, -0.3665])

sqtr

##sqrt
a = torch.tensor([10,2],dtype = torch.float32)
print(torch.sqrt(a))
print(torch.sqrt_(a))
print(a.sqrt())
print(a.sqrt_())
tensor([3.1623, 1.4142])
tensor([3.1623, 1.4142])
tensor([1.7783, 1.1892])
tensor([1.7783, 1.1892])
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值