直接放上代码
from tensorflow.keras.callbacks import LearningRateScheduler, Callback
class EarlyStoppingByLossVal(Callback):
def __init__(self, monitor='val_loss', value=0.00001, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
def step_decay(epoch, lr):
# initial_lrate = 1.0 # no longer needed
boundaries = [10,20] # 为不同的epoch范围设置不同的学习率
values = [100.0, 0.0, 0.1]
for idx,bd in enumerate(boundaries):
if (epoch+1)<bd:
lr = values[idx]
print(epoch, lr)
return lr
print(epoch)
return values[-1]
callbacks = [
EarlyStoppingByLossVal(monitor='val_loss', value=0.3, verbose=1),
LearningRateScheduler(step_decay),
]
adam = optimizers.Adam(learning_rate=0.01, decay=1e-6)
model.compile(optimizer=adam, loss='mean_squared_error')
model.fit(x_train, y_train, epochs=60, batch_size=bs, shuffle=False, verbose=1, validation_data=(x_test, y_test), callbacks=callbacks)
下面是调试结果
根据代码, 结果应该在第10个epoch(实际epoch==9)的时候,学习率变为0, 而在第20个poch(实际epoch==19)的时候,学习率变为0.1. 前面1个正确,不知道为啥后面1个不对???
EarlyStoppingByLossVal是根据传入的监视变量的大小, 来判断是否终止训练.
代码参考这里和这里.