(11条消息) Batch Normalization详解以及pytorch实验_太阳花的小绿豆的博客-CSDN博客
Batch Normalization是解决当输入数据的分布发生变化后,如何减少对深层网络结构的影响(参数的影响)。那么通过计算方差和均值,和参数γ和β的调整,使得不同分布的输入数据可以保持一定的数值稳定性,但又保持了数据的特征。
##批量归一化
import torch
from torch import nn
from d2l import torch as d2l
def btach_norm(x,gama,beta,quanju_mean,quanju_fangcha,xiaopianyishu,moment):
if not torch.is_grad_enabled():#预测模式
x_hat = (x-quanju_mean)/torch.sqrt(quanju_fangcha+xiaopianyishu)
else:
assert len(x.shape) in (2,4)##前面那个讲错了,这里2对应全连接层两个维度(batch,全连接层大小) ,4对应卷积4个维度(batch,通道数,长,宽)
if len(x.shape) == 2:
# 使用全连接层的情况,计算特征维上的均值和方差
mean = x.mean(dim =0)
fangcha = ((x-mean)**2).mean(dim = 0)
else:
# 使用二维卷积层的情况,计算通道维上(axis=1)的均值和方差。
# 这里我们需要保持X的形状以便后面可以做广播运算
mean = x.mean(dim=(0,2,3),keepdim = True)
fangcha = ((x-mean)**2).mean(dim=(0,2,3),keepdim=True)
# 训练模式下,用当前的均值和方差做标准化
x_hat = (x- mean)/torch.sqrt(fangcha+xiaopianyishu)
## 更新移动平均的均值和方差
quanju_mean = moment*quanju_mean+(1-moment)*mean
quanju_fangcha = moment*quanju_fangcha+(1-moment)*fangcha
y = gama*x_hat+beta
return y,quanju_mean.data,quanju_fangcha.data
class batchnormal(nn.Module):
# num_features:完全连接层的输出数量或卷积层的输出通道数。
# num_dims:2表示完全连接层,4表示卷积层
def __init__(self,tezheng,num_dim):
super().__init__()
if num_dim == 2:
shape = (1,tezheng)
else:
shape = (1,tezheng,1,1)
self.gammma = nn.Parameter(torch.ones(shape))
self.beta = nn.Parameter(torch.zeros(shape))
self.quanju_mean = torch.zeros(shape)
self.quanju_fangcha = torch.ones(shape)
def forward(self,x):
if self.quanju_mean.device != x.device:
self.quanju_mean = self.quanju_mean.to(x.device)
self.quanju_fangcha = self.quanju_fangcha.to(x.device)
y,self.quanju_mean,self.quanju_fangcha = btach_norm(x,self.gammma,
self.beta,self.quanju_mean,
self.quanju_fangcha,xiaopianyishu=1e-5,
moment=0.9)
return y
##使用批量规范化层的 LeNet
net = nn.Sequential(
nn.Conv2d(1,6,kernel_size=5),batchnormal(6,num_dim=4),nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2,stride=2),
nn.Conv2d(6,16,kernel_size=5),batchnormal(16,num_dim=4),nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2,stride=2),nn.Flatten(),
nn.Linear(16*4*4,120),batchnormal(120,num_dim=2),nn.Sigmoid(),
nn.Linear(120,84),batchnormal(84,num_dim=2),nn.Sigmoid(),
nn.Linear(84,10)
)
xuexilv,batch,xunliancishu =1.0,256,10
train_iter,test_iter = d2l.load_data_fashion_mnist(batch)
d2l.train_ch6(net,train_iter,test_iter,xunliancishu,xuexilv,d2l.try_gpu())
d2l.plt.show()