Pytorch 属性统计

Pytorch 属性统计

. norm
. mean sum
. prod
. max, min, argmin, argmax
. kthvalue, topk
norm - p :范数
import torch
a = torch.full([8], 1)
b = a.view(2, 4)
c = a.view(2, 2, 2)
# 所有元素的1范数
print("a.norm(1):\t", a.norm(1))
print("a.norm(1):\t", a.norm(1))
print("c.norm(1):\t", c.norm(1))
# 所有元素的2范数
print("a.norm(2):\t", a.norm(2))
print("b.norm(2):\t", b.norm(2))
print("c.norm(2):\t", c.norm(2))

d = b.norm(1, dim=1)
print("d:\t", d)
e = b.norm(2, dim=1)
print("e:\t", e)

f = c.norm(1, dim=0)
print("f:\t", f)
g = c.norm(2, dim=0)
print("g:\t", g)
a.norm(1):	 tensor(8.)
a.norm(1):	 tensor(8.)
c.norm(1):	 tensor(8.)
a.norm(2):	 tensor(2.8284)
b.norm(2):	 tensor(2.8284)
c.norm(2):	 tensor(2.8284)
d:	 tensor([4., 4.])
e:	 tensor([2., 2.])
f:	 tensor([[2., 2.],
        [2., 2.]])
g:	 tensor([[1.4142, 1.4142],
        [1.4142, 1.4142]])
mean,sum, min, max, prod:
import torch
a = torch.arange(8).view(2, 4).float()
print("a:\t", a)

print("a.min():\t", a.min())
print("a.max():\t", a.max())
print("a.mean():\t", a.mean())
print("a.prod():\t", a.prod())

print("a.sum()", a.sum())
# a.argmax(): 表示a中最大元素的索引 前提是将整个维度都打平
print("a.argmax():\t", a.argmax())
# a.argmin(): 表示a中最小元素的索引
print("a.argmin():\t", a.argmin())
a:	 tensor([[0., 1., 2., 3.],
        [4., 5., 6., 7.]])
a.min():	 tensor(0.)
a.max():	 tensor(7.)
a.mean():	 tensor(3.5000)
a.prod():	 tensor(0.)
a.sum() tensor(28.)
a.argmax():	 tensor(7)
a.argmin():	 tensor(0)
argmin,argmax:
import torch
a = torch.arange(8).view(2, 4).float()
b = a.view(1, 2, 4)
print("b:\t", b)
# 打平展开 找a中最大值的索引
c = a.argmax()
print("c:\t", c)

d = a.argmin()
print("d:\t", d)

e = torch.rand(2, 3, 4)
print("e:\t", e)
print("a.argmax():\t", e.argmax())

f = torch.randn(4, 10)
print("f:\t", f)
print("a[0]:\t", a[0])

g = f.argmax()
print("g:\t", g)

h = f.argmax(dim=1)
print("h:\t", h)
b:	 tensor([[[0., 1., 2., 3.],
         [4., 5., 6., 7.]]])
c:	 tensor(7)
d:	 tensor(0)
e:	 tensor([[[0.4047, 0.2962, 0.8146, 0.6037],
         [0.9354, 0.7488, 0.3172, 0.9637],
         [0.6002, 0.4101, 0.9090, 0.4140]],

        [[0.9468, 0.6359, 0.2461, 0.8754],
         [0.4427, 0.9468, 0.2440, 0.2915],
         [0.1552, 0.5877, 0.8878, 0.1947]]])
a.argmax():	 tensor(7)
f:	 tensor([[-0.1211, -0.1426,  0.6958,  1.0245,  1.2680,  0.4812,  0.7119, -0.8774,
          2.0185, -0.1699],
        [-1.9961, -0.7775,  0.7811, -0.4823,  0.7833,  0.6199, -0.3771,  1.2033,
         -0.7897, -1.7518],
        [-0.2140,  1.3854, -0.2574,  0.7923, -0.4817,  0.0912, -0.6918, -0.6742,
          0.7635, -1.2962],
        [ 0.8340, -0.1587, -0.5136,  0.4224,  0.3274, -1.4480, -0.2997, -0.8525,
          0.4084,  1.0327]])
a[0]:	 tensor([0., 1., 2., 3.])
g:	 tensor(32)
h:	 tensor([4, 3, 6, 2])
dim,keepdim:
import torch
a = torch.randn(4, 10)
print("a:\t", a)

b = a.max(dim=1)
print("b\t", b)

c = a.argmax(dim=1)
print("c:\t", c)

d = a.max(dim=1, keepdim=True)
print("d:\t", d)

e = a.argmax(dim=1, keepdim=True)
print("e:\t", e)
a:	 tensor([[ 1.3618,  0.5846,  1.1938, -1.4396, -0.3029, -0.1647, -1.8627,  0.8877,
          0.2870, -1.0845],
        [ 0.9849,  0.1193,  0.1774, -0.4168, -1.6215,  0.7627,  0.4937,  1.0408,
          0.3700, -1.0749],
        [ 1.6469, -1.0133, -0.1912,  0.5724,  0.5074,  0.0776, -1.9356, -2.0871,
          0.3495,  1.7442],
        [-0.4279,  1.2909, -0.7320, -0.2877, -0.1629,  0.2456, -0.0746,  0.4677,
          0.1829, -0.8055]])
b	 torch.return_types.max(
values=tensor([1.3618, 1.0408, 1.7442, 1.2909]),
indices=tensor([0, 7, 9, 1]))
c:	 tensor([0, 7, 9, 1])
d:	 torch.return_types.max(
values=tensor([[1.3618],
        [1.0408],
        [1.7442],
        [1.2909]]),
indices=tensor([[0],
        [7],
        [9],
        [1]]))
e:	 tensor([[0],
        [7],
        [9],
        [1]])
import torch
a = torch.randn(4, 10)
print("a:\t", a)

b = a.topk(3, dim=1)
print("b:\t", b)

c = a.topk(3, dim=1, largest=False)
print("c:\t", c)

d = a.kthvalue(8, dim=1)
print("d:\t", d)
a:	 tensor([[ 1.0492,  0.0685,  0.9684, -1.6225, -1.5455, -0.9173, -1.1044,  1.1907,
         -0.3815,  0.4626],
        [ 0.5621,  0.3048,  1.0324, -1.9408, -2.4720, -0.7893,  0.4810, -1.2472,
          1.0247,  0.4880],
        [-0.8404,  0.3970,  2.2206, -0.1243, -1.9581,  2.1375,  1.2285,  0.3735,
          0.7697,  0.0881],
        [-0.9393, -2.1563, -0.3426, -1.2738,  1.2554, -0.1445, -0.9060, -1.5654,
          0.3997, -0.3591]])
b:	 torch.return_types.topk(
values=tensor([[ 1.1907,  1.0492,  0.9684],
        [ 1.0324,  1.0247,  0.5621],
        [ 2.2206,  2.1375,  1.2285],
        [ 1.2554,  0.3997, -0.1445]]),
indices=tensor([[7, 0, 2],
        [2, 8, 0],
        [2, 5, 6],
        [4, 8, 5]]))
c:	 torch.return_types.topk(
values=tensor([[-1.6225, -1.5455, -1.1044],
        [-2.4720, -1.9408, -1.2472],
        [-1.9581, -0.8404, -0.1243],
        [-2.1563, -1.5654, -1.2738]]),
indices=tensor([[3, 4, 6],
        [4, 3, 7],
        [4, 0, 3],
        [1, 7, 3]]))
d:	 torch.return_types.kthvalue(
values=tensor([ 0.9684,  0.5621,  1.2285, -0.1445]),
indices=tensor([2, 0, 6, 5]))
e:	 torch.return_types.kthvalue(
values=tensor([-1.1044, -1.2472, -0.1243, -1.2738]),
indices=tensor([6, 7, 3, 3]))
f:	 torch.return_types.kthvalue(
values=tensor([-1.1044, -1.2472, -0.1243, -1.2738]),
indices=tensor([6, 7, 3, 3]))
compare:
.  >, >=, <, <=, !=, ==
. torch.eq(a, b)  # 返回 每一维度上是否相等
	. torch.equal(a, b) # 判断每一个元素是否相等
import torch
a = torch.randn(4, 10)
print("a:\t", a)

print("a>0:\t", a>0)
b = torch.gt(a, 0)
print("b:\t", b)

print("a!=0:\t", a!=0)

c = torch.ones(2, 3)
d = torch.randn(2, 3)
e = torch.eq(c, d)
print("e:\t", e)

f = torch.eq(c, c)
print("f:\t", f)

g = torch.equal(c, c)
print("g:\t", g)
a:	 tensor([[ 0.0593,  1.6043, -0.6258, -0.3781,  2.5395,  0.9588, -0.7478,  0.8301,
          0.7250,  1.2348],
        [ 0.2881, -0.7385,  1.1070, -2.0498,  1.1637,  0.1691, -0.0675, -0.0773,
         -0.8256, -0.5518],
        [-1.0803,  0.3671,  0.2700,  1.2518,  0.9975,  0.2951, -0.8154,  0.4540,
         -0.4531, -0.8583],
        [-0.0555, -0.6077,  0.1655,  0.1870,  0.1755, -0.0050,  1.3320,  1.2199,
          1.5145, -0.1658]])
a>0:	 tensor([[ True,  True, False, False,  True,  True, False,  True,  True,  True],
        [ True, False,  True, False,  True,  True, False, False, False, False],
        [False,  True,  True,  True,  True,  True, False,  True, False, False],
        [False, False,  True,  True,  True, False,  True,  True,  True, False]])
b:	 tensor([[ True,  True, False, False,  True,  True, False,  True,  True,  True],
        [ True, False,  True, False,  True,  True, False, False, False, False],
        [False,  True,  True,  True,  True,  True, False,  True, False, False],
        [False, False,  True,  True,  True, False,  True,  True,  True, False]])
a!=0:	 tensor([[True, True, True, True, True, True, True, True, True, True],
        [True, True, True, True, True, True, True, True, True, True],
        [True, True, True, True, True, True, True, True, True, True],
        [True, True, True, True, True, True, True, True, True, True]])
e:	 tensor([[False, False, False],
        [False, False, False]])
f:	 tensor([[True, True, True],
        [True, True, True]])
g:	 True
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值