tensorflow,pytorch中normalize方法

一、tf.linalg.normalize 

def normalize(tensor, ord="euclidean", axis=None, name=None):
  """Normalizes `tensor` along dimension `axis` using specified norm.

  This uses `tf.linalg.norm` to compute the norm along `axis`.

  This function can compute several different vector norms (the 1-norm, the
  Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
  matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).

  Args:
    tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`
    ord: Order of the norm. Supported values are `'fro'`, `'euclidean'`, `1`,
      `2`, `np.inf` and any positive real number yielding the corresponding
      p-norm. Default is `'euclidean'` which is equivalent to Frobenius norm if
      `tensor` is a matrix and equivalent to 2-norm for vectors.
      Some restrictions apply: a) The Frobenius norm `'fro'` is not defined for
        vectors, b) If axis is a 2-tuple (matrix norm), only `'euclidean'`,
        '`fro'`, `1`, `2`, `np.inf` are supported. See the description of `axis`
        on how to compute norms for a batch of vectors or matrices stored in a
        tensor.
    axis: If `axis` is `None` (the default), the input is considered a vector
      and a single vector norm is computed over the entire set of values in the
      tensor, i.e. `norm(tensor, ord=ord)` is equivalent to
      `norm(reshape(tensor, [-1]), ord=ord)`. If `axis` is a Python integer, the
      input is considered a batch of vectors, and `axis` determines the axis in
      `tensor` over which to compute vector norms. If `axis` is a 2-tuple of
      Python integers it is considered a batch of matrices and `axis` determines
      the axes in `tensor` over which to compute a matrix norm.
      Negative indices are supported. Example: If you are passing a tensor that
        can be either a matrix or a batch of matrices at runtime, pass
        `axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are
        computed.

args:

tensor        输入
ord             标准化方法,有l1/l2,常用的是l2,euclidean是欧式距离,我个人理解和l2是一样的
                   此处默认是euclidean
axis            沿哪一个轴/维度标准化,tensorflow此处的函数可以指定元组的轴

示例:

data = np.array([[[ 1., -1.,  2.], 
                [ 2.,  0.,  0.],
                [ 0.,  1., -1.]],
                [[ 1., 1.,  1.], 
                [ 2.,  2.,  2.],
                [ 1.,  1., -1.]],
                [[ 1., 0.,  0.], 
                [ 2.,  0.,  0.],
                [ 0.,  0., -1.]]])

data_tensor = tf.convert_to_tensor(data,dtype=tf.float32)

默认 ord=l2 , axis = None , 此处将tensor展平,27个元素的平方和加起来 = 36 ,norm=6

result,__ = tf.linalg.normalize(data_tensor)

print(result)


tf.Tensor(
[[[ 0.16666667 -0.16666667  0.33333334]
  [ 0.33333334  0.          0.        ]
  [ 0.          0.16666667 -0.16666667]]

 [[ 0.16666667  0.16666667  0.16666667]
  [ 0.33333334  0.33333334  0.33333334]
  [ 0.16666667  0.16666667 -0.16666667]]

 [[ 0.16666667  0.          0.        ]
  [ 0.33333334  0.          0.        ]
  [ 0.          0.         -0.16666667]]], shape=(3, 3, 3), dtype=float32)

axis = 0,沿batch维度

result,__ = tf.linalg.normalize(data_tensor,axis=0)

print(result)

tf.Tensor(
[[[ 0.57735026 -0.70710677  0.8944272 ]
  [ 0.57735026  0.          0.        ]
  [ 0.          0.70710677 -0.57735026]]

 [[ 0.57735026  0.70710677  0.4472136 ]
  [ 0.57735026  1.          1.        ]
  [ 1.          0.70710677 -0.57735026]]

 [[ 0.57735026  0.          0.        ]
  [ 0.57735026  0.          0.        ]
  [ 0.          0.         -0.57735026]]], shape=(3, 3, 3), dtype=float32)

axis = 1 ,沿每一列维度

result,__ = tf.linalg.normalize(data_tensor,axis=1)

print(result)

tf.Tensor(
[[[ 0.4472136  -0.70710677  0.8944272 ]
  [ 0.8944272   0.          0.        ]
  [ 0.          0.70710677 -0.4472136 ]]

 [[ 0.40824828  0.40824828  0.40824828]
  [ 0.81649655  0.81649655  0.81649655]
  [ 0.40824828  0.40824828 -0.40824828]]

 [[ 0.4472136          nan  0.        ]
  [ 0.8944272          nan  0.        ]
  [ 0.                 nan -1.        ]]], shape=(3, 3, 3), dtype=float32)

 axis = 2,沿每一行维度

result,__ = tf.linalg.normalize(data_tensor,axis=2)

print(result)

tf.Tensor(
[[[ 0.40824828 -0.40824828  0.81649655]
  [ 1.          0.          0.        ]
  [ 0.          0.70710677 -0.70710677]]

 [[ 0.57735026  0.57735026  0.57735026]
  [ 0.57735026  0.57735026  0.57735026]
  [ 0.57735026  0.57735026 -0.57735026]]

 [[ 1.          0.          0.        ]
  [ 1.          0.          0.        ]
  [ 0.          0.         -1.        ]]], shape=(3, 3, 3), dtype=float32)

axis = (1,2) 第0维的batch标准化

result,__ = tf.linalg.normalize(data_tensor,axis=(1,2))

print(result)

tf.Tensor(
[[[ 0.28867513 -0.28867513  0.57735026]
  [ 0.57735026  0.          0.        ]
  [ 0.          0.28867513 -0.28867513]]

 [[ 0.23570228  0.23570228  0.23570228]
  [ 0.47140455  0.47140455  0.47140455]
  [ 0.23570228  0.23570228 -0.23570228]]

 [[ 0.40824828  0.          0.        ]
  [ 0.81649655  0.          0.        ]
  [ 0.          0.         -0.40824828]]], shape=(3, 3, 3), dtype=float32)

二、torch.nn.functional.normalize 

def normalize(input: Tensor, p: float = 2, dim: int = 1, eps: float = 1e-12, out: Optional[Tensor] = None) -> Tensor:
    r"""Performs :math:`L_p` normalization of inputs over specified dimension.

    For a tensor :attr:`input` of sizes :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
    :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`dim` is transformed as

    .. math::
        v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.

    With the default arguments it uses the Euclidean norm over vectors along dimension :math:`1` for normalization.

    Args:
        input: input tensor of any shape
        p (float): the exponent value in the norm formulation. Default: 2
        dim (int): the dimension to reduce. Default: 1
        eps (float): small value to avoid division by zero. Default: 1e-12
        out (Tensor, optional): the output tensor. If :attr:`out` is used, this
                                operation won't be differentiable.
    """

args
input        输入
p              normalize的方法,l1,l2,p=2默认为l2标准化
dim          沿哪一个维度标准化,默认为1

data = np.array([[[ 1., -1.,  2.], 
                [ 2.,  0.,  0.],
                [ 0.,  1., -1.]],
                [[ 1., 1.,  1.], 
                [ 2.,  2.,  2.],
                [ 1.,  1., -1.]],
                [[ 1., 0.,  0.], 
                [ 2.,  0.,  0.],
                [ 0.,  0., -1.]]])
data_tensor = torch.tensor(data,dtype=torch.float32)

dim=0,结果与tf的axis=0是相同的

result = F.normalize(data_tensor,dim=0)

print(result)

tensor([[[ 0.5774, -0.7071,  0.8944],
         [ 0.5774,  0.0000,  0.0000],
         [ 0.0000,  0.7071, -0.5774]],

        [[ 0.5774,  0.7071,  0.4472],
         [ 0.5774,  1.0000,  1.0000],
         [ 1.0000,  0.7071, -0.5774]],

        [[ 0.5774,  0.0000,  0.0000],
         [ 0.5774,  0.0000,  0.0000],
         [ 0.0000,  0.0000, -0.5774]]])

 

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
由于没有提供具体的tensorflow1代码,下面是一个简单的示例,将其转换为使用pytorch框架的代码: Tensorflow1代码: ``` import tensorflow as tf # 定义输入和输出 x = tf.placeholder(tf.float32, shape=[None, 784]) y_ = tf.placeholder(tf.float32, shape=[None, 10]) # 定义模型 W = tf.Variable(tf.zeros([784,10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x,W) + b) # 定义损失函数和优化器 cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) # 训练模型 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) # 测试模型 correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) ``` Pytorch代码: ``` import torch import torch.nn as nn import torch.optim as optim from torchvision import datasets, transforms # 定义模型 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(784, 10) def forward(self, x): x = self.fc1(x) return nn.functional.softmax(x, dim=1) model = Net() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.5) # 加载数据 transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) train_dataset = datasets.MNIST('./data', train=True, download=True, transform=transform) test_dataset = datasets.MNIST('./data', train=False, download=True, transform=transform) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False) # 训练模型 for epoch in range(10): for batch_idx, (data, target) in enumerate(train_loader): data = data.view(-1, 784) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() # 测试模型 correct = 0 total = 0 with torch.no_grad(): for data, target in test_loader: data = data.view(-1, 784) output = model(data) _, predicted = torch.max(output.data, 1) total += target.size(0) correct += (predicted == target).sum().item() print('Accuracy: %f' % (correct/total)) ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值