""
一: 定义学习率和epoch的关系
""
# Learning Rate Schedule
def lr_schedule(epoch):
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
""
二: 在优化器中引用
""
Adam(lr=lr_schedule(0))
""
三: 封装底层LearningRateScheduler方法
""
lr_scheduler = LearningRateScheduler(lr_schedule)
""
四: 将LearningRateScheduler方法放入callbacks中,在程序执行过程会调用
""
callbacks = [lr_scheduler]
from tensorflow.keras.callbacks import ReduceLROnPlateau
reduce_lr = ReduceLROnPlateau(monitor=‘val_loss’, factor=0.5, patience=2, verbose=1)
monitor:监测的值,可以是accuracy,val_loss,val_accuracy
factor:缩放学习率的值,学习率将以lr = lr*factor的形式被减少
patience:当patience个epoch过去而模型性能不提升时,学习率减少的动作会被触发
mode:‘auto’,‘min’,‘max’之一 默认‘auto’就行
epsilon:阈值,用来确定是否进入检测值的“平原区”
cooldown:学习率减少后,会经过cooldown个epoch才重新进行正常操作
min_lr:学习率最小值,能缩小到的下限
verbose: 详细信息模式,0 或者 1 。
Reduce=ReduceLROnPlateau(monitor='val_accuracy',
factor=0.1,
patience=2,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=0,
min_lr=0)