keras实战_5.实现批归一化、激活函数、dropout

该博客详细介绍了如何在Keras中使用批归一化、激活函数SELU以及dropout技术。批归一化用于加速训练并提高模型稳定性。SELU激活函数因其自带归一化特性而被讨论。然而,当样本数量有限时,应用dropout可能会导致模型性能下降。
摘要由CSDN通过智能技术生成

批归一化

model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
for _ in range(20):
    model.add(keras.layers.Dense(100, activation="relu"))
    #标准化的好处可以使椭圆分布的数据(梯度下降时难以拟合出最佳路线)呈现圆的分布
    #在每一层中间层激活函数后进行标准化
    model.add(keras.layers.BatchNormalization())

model.add(keras.layers.Dense(10, activation="softmax"))
##################################################################################################
import matplotlib as mpl #Matplotlib 是 Python 的绘图库。 它可与 NumPy 一起使用

import matplotlib.pyplot as plt #Python数据可视化matplotlib.pyplot

#%matplotlib inline #在使用jupyter notebook 或者 jupyter qtconsole的时候,经常会用到%matplotlib inline。其作用就是在你调用plot()进行画图或者直接输入Figure的实例对象的时候,会自动的显示并把figure嵌入到console中。

import numpy as np#数值计算扩展。这种工具可用来存储和处理大型矩阵

import sklearn#机器学习中常用的第三方模块,对常用的机器学习方法进行了封装,包括回归(Regression)、降维(Dimensionality Reduction)、分类(Classfication)、聚类(Clustering)等方法。
    
import pandas as pd#是python的一个数据分析包
import os #系统编程的操作模块,可以处理文件和目录
import sys #sys模块包含了与Python解释器和它的环境有关的函数
import time 
import tensorflow as tf

from tensorflow import keras
##################################################################################################
#选择GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

##################################################################################################

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)

##################################################################################################


fashion_mnist = keras.datasets.fashion_mnist #从keras中导入数据集datasets

(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()# load_data导入训练集和测试集,x是图片,y是label

x_valid, x_train = x_train_all[:5000], x_train_all[5000:]#导入数据集中前x_valid5000个和x_train 为5000之后的
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]#导入数据集中前y_valid5000个和y_train 为5000之后的

print(x_valid.shape, y_valid.shape)#前5000个训练集
print(x_train.shape, y_train.shape)#训练集
print(x_test.shape, y_test.shape)#测试集
##################################################################################################

print(np.max(x_train), np.min(x_train))

##################################################################################################
# x = (x - u) / std u是均值,std是方差
#符合均值是0,方差是1的正态分布

from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()

x_train_scaled = scaler.fit_transform(
    x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
x_valid_scaled = scaler.transform(
    x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
x_test_scaled = scaler.transform(
    x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)

#归一化之前的accurary0.1多,归一化之后的accurary0.9多

##################################################################################################

model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
for _ in range(20):
    model.add(keras.layers.Dense(100, activation="relu"))
    #标准化的好处可以使椭圆分布的数据(梯度下降时难以拟合出最佳路线)呈现圆的分布
    #在每一层中间层激活函数后进行标准化
    model.add(keras.layers.BatchNormalization())

model.add(keras.layers.Dense(10, activation="softmax"))

##################################################################################################


#配置训练模型
model.compile(loss="sparse_categorical_crossentropy",#损失函数
              optimizer = "sgd",#优化器名
              metrics = ["accuracy"])
# reason for sparse: y->index. y->one_hot->[] 
# 原因:y是长度等于样本数目的向量,对于每个样本来说都是一个值,sparse使用y变成一个向量
##################################################################################################

print(model.layers,model.summary())

##################################################################################################
# 回调函数 Tensorboard, earlystopping, ModelCheckpoint
#logdir = './keras实战/callbacks'#这样运行会报错,可能是在windows下路径导致的问题
logdir = "keras实战"
logdir = os.path.join(logdir,"dnn-bn-callbacks")#反正不要出现斜杠表示路径

if not os.path.exists(logdir):
    os.mkdir(logdir)
output_model_file = os.path.join(logdir,"fashion_mnist_model.h5")

callbacks = [
    keras.callbacks.TensorBoard(logdir),#保存在logdir中
    keras.callbacks.ModelCheckpoint(output_model_file,#模型名称
                                    save_best_only = True), #保存一个最好的模型
    #keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),#如果连续5次迭代,loss值达到最小误差1e-3,便停止训练
    ]


##################################################################################################
#开始训练
history = model.fit(x_train_scaled, y_train, epochs=10,
                    validation_data=(x_valid_scaled, y_valid),
                    callbacks = callbacks)


##################################################################################################
#绘制结果图
def plot_learning_curves(history):
    pd.DataFrame(history.history).plot(figsize=(8,5))
    plt.grid(True)
    plt.gca().set_ylim(0,3)
    plt.show()


plot_learning_curves(history)
#梯度归一化缓解梯度消失

#标准化的好处可以使椭圆分布的数据(梯度下降时难以拟合出最佳路线)呈现圆的分布

##################################################################################################

#用训练好的model进行评估
model.evaluate(x_test_scaled,y_test)
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #
=================================================================
flatten (Flatten)            (None, 784)               0
_________________________________________________________________
dense (Dense)                (None, 100)               78500
_________________________________________________________________
batch_normalization (BatchNo (None, 100)               400
_________________________________________________________________
dense_1 (Dense)              (None, 100)               10100
_________________________________________________________________
batch_normalization_1 (Batch (None, 100)               400
_________________________________________________________________
dense_2 (Dense)              (None, 100)               10100
_________________________________________________________________
batch_normalization_2 (Batch (None, 100)               400
_________________________________________________________________
dense_3 (Dense)              (None, 100)               10100
_________________________________________________________________
batch_normalization_3 (Batch (None, 100)               400
_________________________________________________________________
dense_4 (Dense)              (None, 100)               10100
_________________________________________________________________
batch_normalization_4 (Batch (None, 100)               400
_________________________________________________________________
dense_5 (Dense)              (None, 100)               10100
_________________________________________________________________
batch_normalization_5 (Batch (None, 100)               400
_________________________________________________________________
dense_6 (Dense)              (None, 100)               10100
_________________________________________________________________
batch_normalization_6 (Batch (None, 100)               400
_________________________________________________________________
dense_7 (Dense)              (None, 100)               10100
_________________________________________________________________
batch_normalization_7 (Batch (None, 100)               400
_________________________________________________________________
dense_8 (Dense)              (None, 100)               10100
_________________________________________________________________
batch_normalization_8 (Batch (None, 100)               400
_________________________________________________________________
dense_9 (Dense)              (None, 100)               10100
_________________________________________________________________
batch_normalization_9 (Batch (None, 100)               400
_________________________________________________________________
dense_10 (Dense)             (None, 100)               10100
_________________________________________________________________
batch_normalization_10 (Batc (None, 100)               400
_________________________________________________________________
dense_11 (Dense)             (None, 100)               10100
_________________________________________________________________
batch_normalization_11 (Batc (None, 100)               400
_________________________________________________________________
dense_12 (Dense)             (None, 100)               10100
_________________________________________________________________
batch_normalization_12 (Batc (None, 100)               400
_________________________________________________________________
dense_13 (Dense)             (None, 100)               10100
_________________________________________________________________
batch_normalization_13 (Batc (None, 100)               400
_________________________________________________________________
dense_14 (Dense)             (None, 100)               10100
_________________________________________________________________
batch_normalization_14 (Batc (None, 100)               400
_________________________________________________________________
dense_15 (Dense)             (None, 100)               10100
_________________________________________________________________
batch_normalization_15 (Batc (None, 100)               400
_______________________________________________
  • 1
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值