深度学习中五种归一化原理

调用库API实现以及手写实现

import torch 
import torch.nn as nn

batch_szie=2 #sample
time_steps=3
embedding_dim=4 #channel
num_group=2

inputx=torch.randn(batch_szie,time_steps,embedding_dim)# N*L*C
  •  批归一化  BatchNorm1d

## 1. 批归一化 实现batch_norm并验证API  ---- per channel
# NLP: [N,L,C] -> [C]
# CV: [N,C,H,W] -> [C]

batch_norm_op = torch.nn.BatchNorm1d(embedding_dim, affine=False)
bn_y = batch_norm_op(inputx.transpose(-1,-2)).transpose(-1,-2)

#手写batch_norm 
bn_mean=inputx.mean(dim=(0,1),keepdim=True) #.unsqueeze(0).unsqueeze(0).repeat(batch_szie,time_steps,1) #C扩维成 N L C
bn_std=inputx.std(dim=(0,1),unbiased=False,keepdim=True) #.unsqueeze(0).unsqueeze(0).repeat(batch_szie,time_steps,1)
verify_bn_y=(inputx-bn_mean)/(bn_std+1e-5)
print("bn_y:",bn_y)
print("verify_bn_y:",verify_bn_y)

  • 层归一化 LayerNorm

## 2. 层归一化 实现layer_normal并验证API  ---- per sample  per layer
# NLP: [N,L,C] -> [N,L]
# CV: [N,C,H,W] -> [N,H,W]

layer_norm_op=torch.nn.LayerNorm(embedding_dim,elementwise_affine=False)
ln_y=layer_norm_op(inputx)

#手写layer_norm  
ln_mean=inputx.mean(dim=-1,keepdim=True) 
ln_std=inputx.std(dim=-1,unbiased=False,keepdim=True)
verify_ln_y=(inputx-ln_mean)/(ln_std+1e-5)
print("ln_y:",ln_y)
print("verify_ln_y:",verify_ln_y)

 

  • 实例归一化 InstanceNorm1d

## 3. 实例归一化 实现instance_norm并验证API   ---- per sample  per channel  把实时不变的消掉了用于风格迁移
# NLP: [N,L,C] -> [N,C]
# CV: [N,C,H,W] -> [N,C]

ins_norm_op=torch.nn.InstanceNorm1d(embedding_dim)
in_y=ins_norm_op(inputx.transpose(-1,-2)).transpose(-1,-2)

#手写 ins_norm
in_mean=inputx.mean(dim=1,keepdim=True) 
in_std=inputx.std(dim=1,unbiased=False,keepdim=True)
verify_in_y=(inputx-in_mean)/(in_std+1e-5)
print("in_y:",in_y)
print("verify_in_y:",verify_in_y)

 

  • 群归一化 GroupNorm

## 4. 群归一化 实现group_norm并验证API  ------- per sample   per group
# NLP: [N,G,L,C//G] -> [N,G]
# CV: [N,G,C//G,H,W] -> [N,G]

group_norm_op=torch.nn.GroupNorm(num_group,embedding_dim,affine=False)
gn_y=group_norm_op(inputx.transpose(-1,-2)).transpose(-1,-2)

#手写 group_norm  
group_inputxs=torch.split(inputx,split_size_or_sections=embedding_dim // num_group,dim=-1) #tensor最后一维切分
results=[]
for g_inputx in group_inputxs:
    gn_mean=g_inputx.mean(dim=(1,2),keepdim=True)
    gn_std=g_inputx.std(dim=(1,2),unbiased=False,keepdim=True)
    gn_result=(g_inputx-gn_mean)/(gn_std+1e-5)
    results.append(gn_result)
verify_gn_y=torch.cat(results,dim=-1)
print("gn_y:",gn_y)
print("verify_gn_y:",verify_gn_y)

 

  • 权重归一化  weight_norm

### 5. 权重归一化 实现weight_norm并验证API
linear=nn.Linear(embedding_dim,3,bias=False)
wn_linear=torch.nn.utils.weight_norm(linear)
wn_linear_output=wn_linear(inputx)
print(wn_linear_output.shape)

#手写 weight_norm
weight_direction=linear.weight/(linear.weight.norm(dim=1,keepdim=True))
weight_magnitude=wn_linear.weight_g
print(weight_direction.shape)
print(weight_magnitude.shape)
verify_wn_linear_output=inputx@(weight_direction.transpose(-1,-2)) * (weight_magnitude.transpose(-1,-2))
print("wn_linear_output:",wn_linear_output)
print("verify_wn_linear_output:",verify_wn_linear_output)

 

 

  • 3
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值