6个派生优化器的简单介绍及其实现 - 科学空间|Scientific Spaces
# 派生为带分段线性学习率的优化器。 # 其中name参数可选,但最好填入,以区分不同的派生优化器。 AdamLR = extend_with_piecewise_linear_lr(Adam, name='AdamLR')
extend_with_piecewise_linear_lr的定义如下
@export_to_cust 新的优化器类,加入分段线性学习率 """ class NewOptimizer(BaseOptimizer): """带有分段线性学习率的优化器 其中schedule是形如{1000: 1, 2000: 0.1}的字典, 表示0~1000步内学习率线性地从零增加到100%,然后 1000~2000步内线性地降到10%,2000步以后保持10% """ @insert_arguments(lr_schedule={0: 1}) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) self.lr_schedule = {int(i): j for i, j in self.lr_schedule.items()} def _decayed_lr(self, var_dtype): lr_multiplier = piecewise_linear(self.iterations, self.lr_schedule) lr_t = super(NewOptimizer, self)._decayed_lr(var_dtype) return lr_t * K.cast(lr_multiplier, var_dtype) def get_config(self): config = { 'lr_schedule': self.lr_schedule, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer