Keras Adam代码解析以及EMA的Adam优化器


Adam理论可以参考下这里 优化算法的选择

Keras Adam

class Adam(Optimizer):
    """Adam optimizer.

    Default parameters follow those provided in the original paper.

    # Arguments
        learning_rate: float >= 0. Learning rate.
        beta_1: float, 0 < beta < 1. Generally close to 1.
        beta_2: float, 0 < beta < 1. Generally close to 1.
        amsgrad: boolean. Whether to apply the AMSGrad variant of this
            algorithm from the paper "On the Convergence of Adam and
            Beyond".

    # References
        - [Adam - A Method for Stochastic Optimization](
           https://arxiv.org/abs/1412.6980v8)
        - [On the Convergence of Adam and Beyond](
           https://openreview.net/forum?id=ryQu7f-RZ)
    """

    def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999,
                 amsgrad=False, **kwargs):
        self.initial_decay = kwargs.pop('decay', 0.0)
        self.epsilon = kwargs.pop('epsilon', K.epsilon())
        learning_rate = kwargs.pop('lr', learning_rate)
        super(Adam, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.learning_rate = K.variable(learning_rate, name='learning_rate')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(self.initial_decay, name='decay')
        self.amsgrad = amsgrad

    @interfaces.legacy_get_updates_support
    @K.symbolic
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params) # 获取梯度
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.learning_rate
        # 如果初始学习速率衰减因子不为0,则随着迭代次数增加,学习速率将不断减小
        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
                                                      K.dtype(self.decay))))

        t = K.cast(self.iterations, K.floatx()) + 1
        # 有偏估计到无偏估计的校正值
        # 这里将循环内的公共计算提到循环外面,提高速度
        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                     (1. - K.pow(self.beta_1, t)))
		# 一阶矩估计初始值
        ms = [K.zeros(K.int_shape(p),
              dtype=K.dtype(p),
              name='m_' + str(i))
              for (i, p) in enumerate(params)]
        # 二阶矩估计初始值
        vs = [K.zeros(K.int_shape(p),
              dtype=K.dtype(p),
              name='v_' + str(i))
              for (i, p) in enumerate(params)]

        if self.amsgrad:
            vhats = [K.zeros(K.int_shape(p),
                     dtype=K.dtype(p),
                     name='vhat_' + str(i))
                     for (i, p) in enumerate(params)]
        else:
            vhats = [K.zeros(1, name='vhat_' + str(i))
                     for i in range(len(params))]
        self.weights = [self.iterations] + ms + vs + vhats

        for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g # 一阶矩估计
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) # 二阶矩估计
            if self.amsgrad:
                vhat_t = K.maximum(vhat, v_t)
                p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
                self.updates.append(K.update(vhat, vhat_t))
            else:
                p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) # 权值更新

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))
            new_p = p_t

            # 如果参数有约束,对权值添加约束
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))
        return self.updates
	# 获取当前超参数
    def get_config(self):
        config = {'learning_rate': float(K.get_value(self.learning_rate)),
                  'beta_1': float(K.get_value(self.beta_1)),
                  'beta_2': float(K.get_value(self.beta_2)),
                  'decay': float(K.get_value(self.decay)),
                  'epsilon': self.epsilon,
                  'amsgrad': self.amsgrad}
        base_config = super(Adam, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

初始化

继承父类optimizer初始化了self.updates = []self.weights = []allowed_kwargs用于初始化裁剪梯度的函数l1或者l2,这个参数貌似很少输入

    def __init__(self, **kwargs):
        allowed_kwargs = {'clipnorm', 'clipvalue'}
        for k in kwargs:
            if k not in allowed_kwargs:
                raise TypeError('Unexpected keyword argument '
                                'passed to optimizer: ' + str(k))
        self.__dict__.update(kwargs)
        self.updates = []
        self.weights = []

Adam初始化了
initial_decay
epsilon接近0的数,避免除0
learning_rate
生成变量空间存放了以下常量
iterations 迭代次数
learning_rate 学习率
beta_1 一阶矩估计的指数衰减因子
beta_2二阶矩估计的指数衰减因子
decay 学习速率衰减因子
amsgrad adam的一种优化方式

更新函数

见注释

带EMA的Adam

@export_to_custom_objects装饰器主要是对新建的优化器类命名并添加到keras的custom_object中
keras.utils.get_custom_objects()[name] = NewOptimizer
其他的请看注释,执行流程有个问题就是keras训练过程中如何控制ema权重的初始化代码不再执行的,也就是下面的代码:
K.batch_set_value(zip(self.ema_weights, self.old_weights))

@export_to_custom_objects
def extend_with_exponential_moving_average(BaseOptimizer):
    """返回新的优化器类,加入EMA(权重滑动平均)
    """
    class NewOptimizer(BaseOptimizer):
        """带EMA(权重滑动平均)的优化器,EMA实际上就是权重,只不过我们最后用
        """
        @insert_arguments(ema_momentum=0.999)
        def __init__(self, *args, **kwargs):
            super(NewOptimizer, self).__init__(*args, **kwargs)

        def get_updates(self, loss, params):
            # 调用父类 get_updates 就更新了权重 m v
            updates = super(NewOptimizer, self).get_updates(loss, params)
            self.model_weights = params  # 用于更新和reset
            self.ema_weights = [K.zeros(K.shape(w)) for w in params]  # ema 初始化
            self.old_weights = K.batch_get_value(params)
            # 滑动平均不是这样的,是否权重初始化后后续只能K.update
            K.batch_set_value(zip(self.ema_weights, self.old_weights))

            ema_updates, ema_momentum = [], self.ema_momentum
            # 控制依赖,后续执行需要在updates执行后,执行后params就做了更新
            with tf.control_dependencies(updates):
                for w1, w2 in zip(self.ema_weights, params):
                    new_w = ema_momentum * w1 + (1 - ema_momentum) * w2
                    ema_updates.append(K.update(w1, new_w))

            return ema_updates

        def get_config(self):
            config = {'ema_momentum': self.ema_momentum,
                      }
            base_config = super(NewOptimizer, self).get_config()
            return dict(list(base_config.items()) + list(config.items()))

        def apply_ema_weights(self):
            """备份原模型权重,然后将平均权重应用到模型上去。
            """
            self.old_weights = K.batch_get_value(self.model_weights)
            ema_weights = K.batch_get_value(self.ema_weights)
            K.batch_set_value(zip(self.model_weights, ema_weights))

        def reset_old_weights(self):
            """恢复模型到旧权重。
            """
            K.batch_set_value(zip(self.model_weights, self.old_weights))

    return NewOptimizer

本文参考
Keras 中的 Adam 优化器(Optimizer)算法+源码研究
让Keras更酷一些:中间变量、权重滑动和安全生成器

如果想要改写自己的优化器可以参考
玩转Keras之小众需求:自定义优化器

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值