def normalize(input, p=2, dim=1, eps=1e-12, out=None):
# type: (Tensor, float, int, float, Optional[Tensor]) -> Tensor
r"""Performs :math:`L_p` normalization of inputs over specified dimension.
For a tensor :attr:`input` of sizes :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
:math:`n_{dim}` -element vector :math:`v` along dimension :attr:`dim` is transformed as
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
With the default arguments it uses the Euclidean norm over vectors along dimension :math:`1` for normalization.
Args:
input: input tensor of any shape
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
eps (float): small value to avoid division by zero. Default: 1e-12
out (Tensor, optional): the output tensor. If :attr:`out` is used, this
operation won't be differentiable.
"""
if out is None:
denom = input.norm(p, dim, True).clamp_min(eps).expand_as(input)#按维度求范数,0列,1行,默认为1,一般代表通道那个维度,然后指定最小值为eps,_代表原位操作,然后拓展成inputs的形状
return input / denom
else:
denom = input.norm(p, dim, True).clamp_min(eps).expand_as(input)
return torch.div(input, denom, out=out)
normalize/norm函数
最新推荐文章于 2022-03-31 11:06:12 发布