使用Java实现
/**
* 归一化 对一张图的归一化
*
* @author SXC 2020年9月11日 下午2:00:15
*/
public class BatchNorm2d {
double weight = 0.5653, bias = 0;//默认为weight =1,bias=0
public BatchNorm2d(int num_features) {//输入图片通道数,也为特征数,此处无用
}
public double mean(double[] x) {
double meanvalue = 0;
for (int i = 0; i < x.length; i++) {
meanvalue += x[i];
}
return meanvalue / x.length;
}
public double var(double[] x) {
double meanvalue = mean(x);
double varvalue = 0;
for (int i = 0; i < x.length; i++) {
varvalue += (x[i] - meanvalue) * (x[i] - meanvalue);
}
return varvalue / x.length;
}
public void setBias(double bias) {
this.bias = bias;
}
public void setWeight(double weight) {
this.weight = weight;
}
public double getBias() {
return bias;
}
public double getWeight() {
return weight;
}
public double[] out(double[] x) {
double[] out = new double[x.length];
for (int i = 0; i < x.length; i++) {
out[i] = (x[i] - mean(x)) / Math.pow(var(x) + 1e-5, 0.5) * weight + bias;
}
return out;
}
}
测试代码
public class t {
public static void main(String[] args) {
BatchNorm2d BatchNorm2d=new BatchNorm2d(2);
double[] x= {-2.4308, -1.0281, -1.1322, 0.9819,
-0.4069, 0.7973, 1.6296, 1.6797,
0.2802, -0.8285, 2.0101, 0.1286};
double[] k=BatchNorm2d.out(x);
for (int i = 0; i < k.length; i++) {
System.out.println(k[i]);
}
}
}
运行结果
-1.1235822212153521
-0.5105423878128046
-0.5560385357112898
0.36791349380059857
-0.2390514456942742
0.28723540636486006
0.6509860681891563
0.6728819088204158
0.06124061214481498
-0.4233086594695034
0.8172808258776638
-0.005015065294285479
Pytorch实现1
import torch
import torch.nn as nn
m = nn.BatchNorm2d(2,affine=True) #权重w和偏重将被使用
input = torch.randn(1,2,3,4)
output = m(input)
print("输入图片:")
print(input)
print("归一化权重:")
print(m.weight)
print("归一化的偏重:")
print(m.bias)
print("归一化的输出:")
print(output)
print("输出的尺度:")
print(output.size())
# i = torch.randn(1,1,2)
print("输入的第一个维度:")
print(input[0][0])
firstDimenMean = torch.Tensor.mean(input[0][0])
firstDimenVar= torch.Tensor.var(input[0][0],False) #Bessel's Correction贝塞尔校正不会被使用
print(m.eps)
print("输入的第一个维度平均值:")
print(firstDimenMean)
print("输入的第一个维度方差:")
print(firstDimenVar)
bacthnormone = \
((input[0][0][0][0] - firstDimenMean)/(torch.pow(firstDimenVar+m.eps,0.5) ))\
* m.weight[0] + m.bias[0]
print(bacthnormone)
输入图片:
tensor([[[[-2.4308, -1.0281, -1.1322, 0.9819],
[-0.4069, 0.7973, 1.6296, 1.6797],
[ 0.2802, -0.8285, 2.0101, 0.1286]],
[[-0.5740, 0.1970, -0.7209, -0.7231],
[-0.1489, 0.4993, 0.4159, 1.4238],
[ 0.0334, -0.6333, 0.1308, -0.2180]]]])
归一化权重:
Parameter containing:
tensor([ 0.5653, 0.0322])
归一化的偏重:
Parameter containing:
tensor([ 0., 0.])
归一化的输出:
tensor([[[[-1.1237, -0.5106, -0.5561, 0.3679],
[-0.2391, 0.2873, 0.6510, 0.6729],
[ 0.0612, -0.4233, 0.8173, -0.0050]],
[[-0.0293, 0.0120, -0.0372, -0.0373],
[-0.0066, 0.0282, 0.0237, 0.0777],
[ 0.0032, -0.0325, 0.0084, -0.0103]]]])
输出的尺度:
torch.Size([1, 2, 3, 4])
输入的第一个维度:
tensor([[-2.4308, -1.0281, -1.1322, 0.9819],
[-0.4069, 0.7973, 1.6296, 1.6797],
[ 0.2802, -0.8285, 2.0101, 0.1286]])
1e-05
输入的第一个维度平均值:
tensor(0.1401)
输入的第一个维度方差:
tensor(1.6730)
tensor(-1.1237)
输出结果相同,但这里未弄清weight,bias的取值怎么来,只实现了BatchNorm2d (1),num_features未使用到,后期需修改。
Pytorch实现2
n=nn.BatchNorm2d(num_features=1)
w=torch.tensor([0.5653],dtype=torch.float32)
print(n.weight)
n.weight=torch.nn.Parameter(w)
k=torch.tensor([[[[-2.4308, -1.0281, -1.1322, 0.9819],
[-0.4069, 0.7973, 1.6296, 1.6797],
[ 0.2802, -0.8285, 2.0101, 0.1286]]]])
print(n.weight)
y = n(k)
print(y)
运行结果
Parameter containing:
tensor([1.], requires_grad=True)
Parameter containing:
tensor([0.5653], requires_grad=True)
tensor([[[[-1.1236, -0.5105, -0.5560, 0.3679],
[-0.2391, 0.2872, 0.6510, 0.6729],
[ 0.0612, -0.4233, 0.8173, -0.0050]]]],
grad_fn=<NativeBatchNormBackward>)
Process finished with exit code 0
原BatchNorm2d函数的weight的默认值为1,bias默认值为0。