torch中的BatchNorm LayerNorm InstanceNorm使用方法

1. torch中标准normalization函数与自定义函数的对比,以说明标准库函数的使用方法。

同时,代码中对4维数据、3维数据以及2维数据进行了对比。

注意在2维数据的情况下,nn.InstanceNorm1d是不能求解的,不存在1维的Instancenorm.

#-*- coding:utf-8 -*-
#Author LJB Create on 2021/8/27
import torch
import torch.nn as nn
import numpy as np


#对于4维数据[batchsize,channel,H,W],其中[H,W]表示一个实例的二维特征维度
#LayerNorm: axis=(1,2,3)
#InstanceNorm: axis=(2,3)
#BatchNorm: axis=(0,2,3)
def MyNorm_4d(x, axis,gamma=1.0, beta=0.0):

    # x_shape:[B, C, H, W]
    results = 0.
    eps = 1e-5
    x_mean = np.mean(x, axis=axis, keepdims=True)
    x_var = np.var(x, axis=axis, keepdims=True)
    x_normalized = (x - x_mean) / np.sqrt(x_var + eps)
    results = gamma * x_normalized + beta
    return results

#对于3维数据[batchsize,channel,d_model],d_model表示一个实例的一维特征维度
#LayerNorm: axis=(1,2)
#InstanceNorm: axis=(2)
#BatchNorm: axis=(0,2)
def MyNorm_3d(x, axis,gamma=1.0, beta=0.0):

    # x_shape:[B, C, D], D表示d_model
    results = 0.
    eps = 1e-5
    x_mean = np.mean(x, axis=axis, keepdims=True)
    x_var = np.var(x, axis=axis, keepdims=True)
    x_normalized = (x - x_mean) / np.sqrt(x_var + eps)
    results = gamma * x_normalized + beta
    return results

#对于2维数据[batchsize,d_model],d_model表示一个实例的一维特征维度
#LayerNorm: axis=(1)
#InstanceNorm: axis=(1)
#BatchNorm: axis=(0)
def MyNorm_2d(x, axis,gamma=1.0, beta=0.0):

    # x_shape:[B, D], D表示d_model
    results = 0.
    eps = 1e-5
    x_mean = np.mean(x, axis=axis, keepdims=True)
    x_var = np.var(x, axis=axis, keepdims=True)
    x_normalized = (x - x_mean) / np.sqrt(x_var + eps)
    results = gamma * x_normalized + beta
    # print('x_mean x_std:',x_mean,np.sqrt(x_var + eps))
    return results


if __name__=='__main__':
    #模拟数据
    batchsize = 3
    channel = 2
    H = 5
    W = 7
    data = torch.randint(0,9,(batchsize,channel,H,W)).float()
    #标准layernorm
    n1 = nn.LayerNorm(normalized_shape=[channel,H,W])(data)  #参数为shape
    n2 = MyNorm_4d(data.numpy(),axis=(1,2,3))  #参数为坐标轴
    # print('+++ Normal Layernorm:',n1)
    # print('+++ My Layernorm:',n2)


    #当数据为4维时,一个实例是[H,W]形的矩阵,此时nn.LayerNorm也可以当InstanceNorm使用
    n3 = nn.LayerNorm(normalized_shape=[H,W])(data)  #参数为shape
    n4 = MyNorm_4d(data.numpy(),axis=(2,3))  #参数为坐标轴
    #标准InstanceNorm
    n5 = nn.InstanceNorm2d(channel)(data) #参数为通道数
    # print('+++ Normal Instance(Layernorm):')
    # print(n3)
    # print('+++ My Instance norm:')
    # print(n4)
    # print('+++ Normal InstanceNorm:')
    # print(n5)

    n6 = nn.BatchNorm2d(channel)(data)
    n7 = MyNorm_4d(data.numpy(),axis=(0,2,3))  #参数为坐标轴
    # print('+++ Normal BatchNorm2d:')
    # print(n6)
    # print('+++ My batch norm:')
    # print(n7)

    ############################################################
    #对3维数据(形状为[batchsize,seqlen,d_model]时
    batchsize_3d = 5
    seqlen = 4
    d_model = 3
    data_3d = torch.randint(0,9,(batchsize_3d,seqlen,d_model)).float()
    # print('+++ Normal LayerNorm:')
    # print(nn.LayerNorm([seqlen,d_model])(data_3d))  #此时seqlen视为通道维
    # print('+++ My LayerNorm:')
    # print(MyNorm_3d(data_3d.numpy(),axis=(1,2)))
    #
    # print('+++ Normal InstanceNorm1d:')
    # print(nn.InstanceNorm1d(seqlen)(data_3d))
    # print('+++ Normal Instance(LayerNorm):')
    # print(nn.LayerNorm(d_model)(data_3d))  #此时seqlen视为通道维
    # print('+++ My Instance Norm:')
    # print(MyNorm_3d(data_3d.numpy(),axis=(2)))
    #
    # print('+++ Normal BatchNorm1d:')
    # print(nn.BatchNorm1d(seqlen)(data_3d))
    # print('+++ My Batch Norm:')
    # print(MyNorm_3d(data_3d.numpy(),axis=(0,2)))
    print('#'*80)
    batchsize_2d = 5
    d_model = 3
    data_2d = torch.randint(0,9,(batchsize_2d,d_model)).float()
    print('+++ Normal LayerNorm:')
    print(nn.LayerNorm([d_model])(data_2d))
    print('+++ My LayerNorm:')
    print(MyNorm_2d(data_2d.numpy(),axis=(1)))

    print('+++ Normal InstanceNorm1d: Error')
    #print(nn.InstanceNorm1d(1)(data_2d)  #二维数据时,会报错
    print('+++ Normal Instance(LayerNorm):')
    print(nn.LayerNorm(d_model)(data_2d))
    print('+++ My Instance Norm:')
    print(MyNorm_2d(data_2d.numpy(),axis=(1)))

    print('+++ Normal BatchNorm1d:')
    print(nn.BatchNorm1d(d_model)(data_2d))  #二维数据时,用d_model,详看函数定义
    print('+++ My Batch Norm:')
    print(MyNorm_2d(data_2d.numpy(),axis=(0)))

2. 说明了在eval模式下,只有BatchNorm会屏蔽,其他Norm函数不会屏蔽

import torch
import torch.nn as nn
import numpy as np

class NormModel(nn.Module):
    def __init__(self,size):
        super(NormModel,self).__init__()
        self.layer_norm = nn.LayerNorm(size) #不受eval 影响
        self.instance_norm = nn.InstanceNorm2d(3)#不受eval影响
        self.batch_norm = nn.BatchNorm2d(3)  #当模型为eval时,会屏蔽batchnorm


norm = NormModel((5,7))
data = torch.randint(0,9,(2,3,5,7)).float()
# print(list(norm.named_parameters()))
norm.eval()
print(data)
print('*'*50)
print(norm.layer_norm(data))
print('*'*50)
print(norm.instance_norm(data))
print('*'*50)
print(norm.batch_norm(data))

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值