在finetune的时候优化bert中优化器AdamWeightDecayOptimizer

 主要是bert源码当中的adam是简化版本的,这里给出完整版的adamWeightDecay

# bert源码中的AdamWeightDecayOptimizer
class AdamWeightDecayOptimizer(tf.train.Optimizer):
    """A basic Adam optimizer that includes "correct" L2 weight decay."""

    def __init__(self,
                 learning_rate,
                 weight_decay_rate=0.0,
                 beta_1=0.9,
                 beta_2=0.999,
                 epsilon=1e-6,
                 exclude_from_weight_decay=None,
                 name="AdamWeightDecayOptimizer"):
        """Constructs a AdamWeightDecayOptimizer."""
        super(AdamWeightDecayOptimizer, self).__init__(False, name)

        self.learning_rate = learning_rate
        self.weight_decay_rate = weight_decay_rate
        self.beta_1 = beta_1
        self.beta_2 = beta_2
        self.epsilon = epsilon
        self.exclude_from_weight_decay = exclude_from_weight_decay

    def apply_gradients(self, grads_and_vars, global_step=None, name=None):
        """See base class."""
        assignments = []
        for (grad, param) in grads_and_vars:
            if grad is None or param is None:
                continue

            param_name = self._get_variable_name(param.name)

            m = tf.get_variable(
                name=param_name + "/adam_m",
                shape=param.shape.as_list(),
                dtype=tf.float32,
                trainable=False,
                initializer=tf.zeros_initializer())
            v = tf.get_variable(
                name=param_name + "/adam_v",
                shape=param.shape.as_list(),
                dtype=tf.float32,
                trainable=False,
                initializer=tf.zeros_initializer())

            # Standard Adam update.
            next_m = (
                    tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
            next_v = (
                    tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
                                                              tf.square(grad)))

            update = next_m / (tf.sqrt(next_v) + self.epsilon)

            # Just adding the square of the weights to the loss function is *not*
            # the correct way of using L2 regularization/weight decay with Adam,
            # since that will interact with the m and v parameters in strange ways.
            #
            # Instead we want ot decay the weights in a manner that doesn't interact
            # with the m/v parameters. This is equivalent to adding the square
            # of the weights to the loss with plain (non-momentum) SGD.
            if self._do_use_weight_decay(param_name):
                update += self.weight_decay_rate * param

            update_with_lr = self.learning_rate * update

            next_param = param - update_with_lr

            assignments.extend(
                [param.assign(next_param),
                 m.assign(next_m),
                 v.assign(next_v)])
        return tf.group(*assignments, name=name)

    def _do_use_weight_decay(self, param_name):
        """Whether to use L2 weight decay for `param_name`."""
        if not self.weight_decay_rate:
            return False
        if self.exclude_from_weight_decay:
            for r in self.exclude_from_weight_decay:
                if re.search(r, param_name) is not None:
                    return False
        return True

    def _get_variable_name(self, param_name):
        """Get the variable name from the tensor name."""
        m = re.match("^(.*):\\d+$", param_name)
        if m is not None:
            param_name = m.group(1)
        return param_name


# adam原始论文对应的源码
# class AdamWeightDecayOptimizer(tf.train.Optimizer):
#     """A basic Adam optimizer that includes "correct" L2 weight decay."""

#     def __init__(self,
#                  learning_rate,
#                  weight_decay_rate=0.0,
#                  beta_1=0.9,
#                  beta_2=0.999,
#                  epsilon=1e-6,
#                  exclude_from_weight_decay=None,
#                  name="AdamWeightDecayOptimizer"):
#         """Constructs a AdamWeightDecayOptimizer."""
#         super(AdamWeightDecayOptimizer, self).__init__(False, name)

#         self.learning_rate = learning_rate
#         self.weight_decay_rate = weight_decay_rate
#         self.beta_1 = beta_1
#         self.beta_2 = beta_2
#         self.epsilon = epsilon
#         self.exclude_from_weight_decay = exclude_from_weight_decay
#         self.learning_rate_t = None
#         self._beta1_t = None
#         self._beta2_t = None
#         self._epsilon_t = None
    
#     def _get_beta_accumulators(self):
#         with ops.init_scope():
#             if tf.executing_eagerly():
#                 graph = None
#             else:
#                 graph = ops.get_default_graph()
#             return (self._get_non_slot_variable("beta1_power", graph=graph),
#                     self._get_non_slot_variable("beta2_power", graph=graph))


#     def _prepare(self):
#         self.learning_rate_t = ops.convert_to_tensor(
#             self.learning_rate, name='learning_rate')
#         self.weight_decay_rate_t = ops.convert_to_tensor(
#             self.weight_decay_rate, name='weight_decay_rate')
#         self.beta_1_t = ops.convert_to_tensor(self.beta_1, name='beta_1')
#         self.beta_2_t = ops.convert_to_tensor(self.beta_2, name='beta_2')
#         self.epsilon_t = ops.convert_to_tensor(self.epsilon, name='epsilon')

#     def _create_slots(self, var_list):
#         first_var = min(var_list, key=lambda x: x.name)
#         self._create_non_slot_variable(initial_value=self.beta_1,
#                                     name="beta1_power",
#                                     colocate_with=first_var)
#         self._create_non_slot_variable(initial_value=self.beta_2,
#                                     name="beta2_power",
#                                     colocate_with=first_var)
#         for v in var_list:
#             self._zeros_slot(v, 'm', self._name)
#             self._zeros_slot(v, 'v', self._name)

#     def _apply_dense(self, grad, var):
#         learning_rate_t = math_ops.cast(
#             self.learning_rate_t, var.dtype.base_dtype)
#         beta_1_t = math_ops.cast(self.beta_1_t, var.dtype.base_dtype)
#         beta_2_t = math_ops.cast(self.beta_2_t, var.dtype.base_dtype)
#         epsilon_t = math_ops.cast(self.epsilon_t, var.dtype.base_dtype)
#         weight_decay_rate_t = math_ops.cast(
#             self.weight_decay_rate_t, var.dtype.base_dtype)

#         m = self.get_slot(var, 'm')
#         v = self.get_slot(var, 'v')
#         beta1_power, beta2_power = self._get_beta_accumulators()
#         beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
#         beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
#         learning_rate_t = math_ops.cast(self.learning_rate_t, var.dtype.base_dtype)
#         learning_rate_t = (learning_rate_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
    
#         # Standard Adam update.
#         next_m = (
#             tf.multiply(beta_1_t, m) +
#             tf.multiply(1.0 - beta_1_t, grad))
#         next_v = (
#             tf.multiply(beta_2_t, v) + tf.multiply(1.0 - beta_2_t,
#                                                    tf.square(grad)))

#         update = next_m / (tf.sqrt(next_v) + epsilon_t)

#         if self._do_use_weight_decay(var.name):
#             update += weight_decay_rate_t * var

#         update_with_lr = learning_rate_t * update

#         next_param = var - update_with_lr

#         return control_flow_ops.group(*[var.assign(next_param),
#                                         m.assign(next_m),
#                                         v.assign(next_v)])

#     def _resource_apply_dense(self, grad, var):
#         learning_rate_t = math_ops.cast(
#             self.learning_rate_t, var.dtype.base_dtype)
#         beta_1_t = math_ops.cast(self.beta_1_t, var.dtype.base_dtype)
#         beta_2_t = math_ops.cast(self.beta_2_t, var.dtype.base_dtype)
#         epsilon_t = math_ops.cast(self.epsilon_t, var.dtype.base_dtype)
#         weight_decay_rate_t = math_ops.cast(
#             self.weight_decay_rate_t, var.dtype.base_dtype)

#         m = self.get_slot(var, 'm')
#         v = self.get_slot(var, 'v')
#         beta1_power, beta2_power = self._get_beta_accumulators()
#         beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
#         beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
#         learning_rate_t = math_ops.cast(self.learning_rate_t, var.dtype.base_dtype)
#         learning_rate_t = (learning_rate_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
    
#         # Standard Adam update.
#         next_m = (
#             tf.multiply(beta_1_t, m) +
#             tf.multiply(1.0 - beta_1_t, grad))
#         next_v = (
#             tf.multiply(beta_2_t, v) + tf.multiply(1.0 - beta_2_t,
#                                                    tf.square(grad)))

#         update = next_m / (tf.sqrt(next_v) + epsilon_t)

#         if self._do_use_weight_decay(var.name):
#             update += weight_decay_rate_t * var

#         update_with_lr = learning_rate_t * update

#         next_param = var - update_with_lr

#         return control_flow_ops.group(*[var.assign(next_param),
#                                         m.assign(next_m),
#                                         v.assign(next_v)])

#     def _apply_sparse_shared(self, grad, var, indices, scatter_add):
#         learning_rate_t = math_ops.cast(
#             self.learning_rate_t, var.dtype.base_dtype)
#         beta_1_t = math_ops.cast(self.beta_1_t, var.dtype.base_dtype)
#         beta_2_t = math_ops.cast(self.beta_2_t, var.dtype.base_dtype)
#         epsilon_t = math_ops.cast(self.epsilon_t, var.dtype.base_dtype)
#         weight_decay_rate_t = math_ops.cast(
#             self.weight_decay_rate_t, var.dtype.base_dtype)

#         m = self.get_slot(var, 'm')
#         v = self.get_slot(var, 'v')
#         beta1_power, beta2_power = self._get_beta_accumulators()
#         beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
#         beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
#         learning_rate_t = math_ops.cast(self.learning_rate_t, var.dtype.base_dtype)
#         learning_rate_t = (learning_rate_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
    
#         m_t = state_ops.assign(m, m * beta_1_t,
#                                use_locking=self._use_locking)

#         m_scaled_g_values = grad * (1 - beta_1_t)
#         with ops.control_dependencies([m_t]):
#             m_t = scatter_add(m, indices, m_scaled_g_values)

#         v_scaled_g_values = (grad * grad) * (1 - beta_2_t)
#         v_t = state_ops.assign(v, v * beta_2_t, use_locking=self._use_locking)
#         with ops.control_dependencies([v_t]):
#             v_t = scatter_add(v, indices, v_scaled_g_values)

#         update = m_t / (math_ops.sqrt(v_t) + epsilon_t)

#         if self._do_use_weight_decay(var.name):
#             update += weight_decay_rate_t * var

#         update_with_lr = learning_rate_t * update

#         var_update = state_ops.assign_sub(var,
#                                           update_with_lr,
#                                           use_locking=self._use_locking)
#         return control_flow_ops.group(*[var_update, m_t, v_t])

#     def _apply_sparse(self, grad, var):
#         return self._apply_sparse_shared(
#             grad.values, var, grad.indices,
#             lambda x, i, v: state_ops.scatter_add(  # pylint: disable=g-long-lambda
#                 x, i, v, use_locking=self._use_locking))

#     def _resource_scatter_add(self, x, i, v):
#         with ops.control_dependencies(
#                 [resource_variable_ops.resource_scatter_add(
#                     x.handle, i, v)]):
#             return x.value()

#     def _resource_apply_sparse(self, grad, var, indices):
#         return self._apply_sparse_shared(
#             grad, var, indices, self._resource_scatter_add)

#     def _do_use_weight_decay(self, param_name):
#         """Whether to use L2 weight decay for `param_name`."""
#         if not self.weight_decay_rate:
#             return False
#         if self.exclude_from_weight_decay:
#             for r in self.exclude_from_weight_decay:
#                 if re.search(r, param_name) is not None:
#                     return False
#         return True
#     def _finish(self, update_ops, name_scope):
#         # Update the power accumulators.
#         with ops.control_dependencies(update_ops):
#             beta1_power, beta2_power = self._get_beta_accumulators()
#             with ops.colocate_with(beta1_power):
#                 update_beta1 = beta1_power.assign(
#                     beta1_power * self.beta_1_t, use_locking=self._use_locking)
#                 update_beta2 = beta2_power.assign(
#                     beta2_power * self.beta_2_t, use_locking=self._use_locking)
#             return control_flow_ops.group(*update_ops + [update_beta1, update_beta2],
#                                         name=name_scope)

  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

samoyan

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值