python定义fmax_浅谈pytorch中torch.max和F.softmax函数的维度解释

本文详细介绍了PyTorch中torch.max和F.softmax函数的使用,特别是如何设置维度参数。通过二维和三维张量的例子,阐述了这两个函数在不同维度上的作用,帮助理解它们在概率分布和取最大值方面的应用。
摘要由CSDN通过智能技术生成

在利用torch.max函数和F.Ssoftmax函数时,对应该设置什么维度,总是有点懵,遂总结一下:

首先看看二维tensor的函数的例子:

import torch

import torch.nn.functional as F

input = torch.randn(3,4)

print(input)

tensor([[-0.5526, -0.0194, 2.1469, -0.2567],

[-0.3337, -0.9229, 0.0376, -0.0801],

[ 1.4721, 0.1181, -2.6214, 1.7721]])

b = F.softmax(input,dim=0) # 按列SoftMax,列和为1

print(b)

tensor([[0.1018, 0.3918, 0.8851, 0.1021],

[0.1268, 0.1587, 0.1074, 0.1218],

[0.7714, 0.4495, 0.0075, 0.7762]])

c = F.softmax(input,dim=1) # 按行SoftMax,行和为1

print(c)

tensor([[0.0529, 0.0901, 0.7860, 0.0710],

[0.2329, 0.1292, 0.3377, 0.3002],

[0.3810, 0.0984, 0.0064, 0.5143]])

d = torch.max(input,dim=0) # 按列取max,

print(d)

torch.return_types.max(

values=tensor([1.4721, 0.1181, 2.1469, 1.7721]),

indices=tensor([2, 2, 0, 2]))

e = torch.max(input,dim=1) # 按行取max,

print(e)

torch.return_types.max(

values=tensor([2.1469, 0.0376, 1.7721]),

indices=tensor([2, 2, 3]))

下面看看三维tensor解释例子:

函数softmax输出的是所给矩阵的概率分布;

b输出的是在dim=0维上的概率分布,b[0][5][6]+b[1][5][6]+b[2][5][6]=1

a=torch.rand(3,16,20)

b=F.softmax(a,dim=0)

c=F.softmax(a,dim=1)

d=F.softmax(a,dim=2)

In [1]: import torch as t

In [2]: import torch.nn.functional as F

In [4]: a=t.Tensor(3,4,5)

In [5]: b=F.softmax(a,dim=0)

In [6]: c=F.softmax(a,dim=1)

In [7]: d=F.softmax(a,dim=2)

In [8]: a

Out[8]:

tensor([[[-0.1581, 0.0000, 0.0000, 0.0000, -0.0344],

[ 0.0000, -0.0344, 0.0000, -0.0344, 0.0000],

[-0.0344, 0.0000, -0.0344, 0.0000, -0.0344],

[ 0.0000, -0.0344, 0.0000, -0.0344, 0.0000]],

[[-0.0344, 0.0000,

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
# -*- coding: utf-8 -*- """ Created on Fri Mar 5 19:13:21 2021 @author: LXM """ import torch import torch.nn as nn from torch.autograd import Function class UpdateRange(nn.Module): def __init__(self, device): super(UpdateRange, self).__init__() self.device = device self.flag = 0 self.fmin = torch.zeros((1), dtype = torch.float32, device = self.device) self.fmax = torch.zeros((1), dtype = torch.float32, device = self.device) def Update(self, fmin, fmax): if self.flag == 0: self.flag = 1 new_fmin = fmin new_fmax = fmax else: new_fmin = torch.min(fmin, self.fmin) new_fmax = torch.max(fmax, self.fmax) self.fmin.copy_(new_fmin) self.fmax.copy_(new_fmax) @torch.no_grad() def forward(self, input): fmin = torch.min(input) fmax = torch.max(input) self.Update(fmin, fmax) class Round(Function): @staticmethod def forward(self, input): # output = torch.round(input) # output = torch.floor(input) output = input.int().float() return output @staticmethod def backward(self, output): input = output.clone() return input class Quantizer(nn.Module): def __init__(self, bits, device): super(Quantizer, self).__init__() self.bits = bits self.scale = 1 self.UpdateRange = UpdateRange(device) self.qmin = torch.tensor((-((1 << (bits - 1)) - 1)), device = device) self.qmax = torch.tensor((+((1 << (bits - 1)) - 1)), device = device) def round(self, input): output = Round.apply(input) return output def Quantization(self): quant_range = float(1 << (self.bits - 1)) float_range = torch.max(torch.abs(self.UpdateRange.fmin), torch.abs(self.UpdateRange.fmax)) scale = 1 for i in range(32): if torch.round(float_range * (1 << i)) < quant_range: scale = 1 << i else: break self.scale = scale def forward(self, input): if self.training: self.UpdateRange(input) self.Quantization() output = (torch.clamp(self.round(input * self.scale), self.qmin, self.qmax)) / self.scale return output
07-10
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值