pytorch中BatchNorm2d的方法测试
代码测试
batchnormal是对一个batch_size中的每个channel进行归一化
from torch import nn
import torch
m = nn.BatchNorm2d(3)
input = torch.randn(4,3,2,2)
output = m(input)
input
tensor([[[[ 0.4306, -0.7709],
[ 0.6851, 0.1494]],
[[ 1.7471, -1.7052],
[-0.5291, 1.1020]],
[[-0.0628, 0.3957],
[-1.2972, 0.2755]]],
[[[-1.4248, -0.9931],
[ 0.2116, -0.1276]],
[[ 1.1484, -1.7007],
[ 0.6152, -0.5849]],
[[ 1.6353, -0.4531],
[-0.1967, 0.3131]]],
[[[ 0.7961, -1.0402],
[-0.7531, -0.3165]],
[[ 0.3842, 0.1521],
[-0.4894, -1.6995]],
[[ 1.5982, 1.1514],
[ 2.3785, -2.1560]]],
[[[-1.5085, -0.7050],
[ 2.7356, -0.0884]],
[[ 0.7462, -0.0832],
[-0.8313, -0.8382]],
[[-0.1408, 1.9773],
[-0.0097, 0.0597]]]])
output
tensor([[[[ 0.5908, -0.5911],
[ 0.8412, 0.3142]],
[[ 1.8381, -1.4886],
[-0.3553, 1.2165]],
[[-0.3516, 0.0468],
[-1.4244, -0.0576]]],
[[[-1.2344, -0.8097],
[ 0.3754, 0.0417]],
[[ 1.2612, -1.4843],
[ 0.7474, -0.4091]],
[[ 1.1242, -0.6908],
[-0.4680, -0.0249]]],
[[[ 0.9504, -0.8560],
[-0.5736, -0.1442]],
[[ 0.5248, 0.3012],
[-0.3170, -1.4831]],
[[ 1.0919, 0.7036],
[ 1.7700, -2.1707]]],
[[[-1.3168, -0.5263],
[ 2.8583, 0.0803]],
[[ 0.8736, 0.0744],
[-0.6465, -0.6531]],
[[-0.4193, 1.4213],
[-0.3055, -0.2451]]]], grad_fn=<NativeBatchNormBackward>)
m.weight
Parameter containing:
tensor([1., 1., 1.], requires_grad=True)
m.bias
Parameter containing:
tensor([0., 0., 0.], requires_grad=True)
m.eps
1e-05
firstChannelMean = torch.Tensor.mean(input[:,0])
firstChannelMean
tensor(-0.1700)
firstChannelVar = torch.Tensor.var(input[:,0],False)
firstChannelVar
tensor(1.0333)
batchnormal1=(input[0][0][0][0]-firstChannelMean)/(torch.pow(firstChannelVar,0.5)+m.eps)
batchnormal1
tensor(0.5908)
output[0][0][0][0]
tensor(0.5908, grad_fn=<SelectBackward>)