解析 slim.arg_scope

 

def resnet_arg_scope(weight_decay=0.0001,
                     is_training=True,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True,
                     activation_fn=tf.nn.relu,
                     use_batch_norm=True):
  """Defines the default ResNet arg scope.

  TODO(gpapan): The batch-normalization related default values above are
    appropriate for use in conjunction with the reference ResNet models
    released at https://github.com/KaimingHe/deep-residual-networks. When
    training ResNets from scratch, they might need to be tuned.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: The moving average decay when estimating layer activation
      statistics in batch normalization.
    batch_norm_epsilon: Small constant to prevent division by zero when
      normalizing activations by their variance in batch normalization.
    batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
      activations in the batch normalization layer.
    activation_fn: The activation function which is used in ResNet.
    use_batch_norm: Whether or not to use batch normalization.

  Returns:
    An `arg_scope` to use for the resnet models.
  """
  batch_norm_params = {
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': None,
      'is_training': is_training,
      'fused': True,  # Use fused batch norm if possible.
  }

  with slim.arg_scope(     #1  
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=activation_fn,
      normalizer_fn=slim.batch_norm if use_batch_norm else None,
      normalizer_params=batch_norm_params):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params): #2  
      # The following implies padding='SAME' for pool1, which makes feature
      # alignment easier for dense prediction tasks. This is also used in
      # https://github.com/facebook/fb.resnet.torch. However the accompanying
      # code of 'Deep Residual Learning for Image Recognition' uses
      # padding='VALID' for pool1. You can switch to that choice by setting
      # slim.arg_scope([slim.max_pool2d], padding='VALID')
      with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: #23
        return arg_sc

 

#1 第一个 slim.arg_scope [slim.conv2d]  就是 给[] 里面的所有的 op,传递后面默认的参数    

# 2 第二个 slim.arg_scope 假设 现在增加一个[slim.batch_norm,slim.conv2d] 然后 第一个arg_scope的参数 

weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=activation_fn,
      normalizer_fn=slim.batch_norm if use_batch_norm else None,
      normalizer_params=batch_norm_params 以及 第二个arg_scope的 参数 batch_norm_params 都会传递给conv2d  

#3 第三个 slim.arg_scope 的arg_sc 其实包含了 上面三个scope的 list 里面的op 机器对应的参数   

resnet_arg_scope = resnet_utils.resnet_arg_scope

resnet_arg_scope ()返回 

{
'<function convolution2d at 0x000002680A9DE488>': {'weights_regularizer': <function l2_regularizer.<locals>.l2 at 0x000002680DEDABF8>, 
'weights_initializer': <function variance_scaling_initializer.<locals>._initializer at 0x000002680DEDAD08>,
 'activation_fn': <function relu at 0x0000026805F9F9D8>, 
 'normalizer_fn': <function add_arg_scope.<locals>.func_with_args at 0x000002680A9D5EA0>, 
 'normalizer_params': {'decay': 0.997, 'epsilon': 1e-05, 'scale': True, 'updates_collections': None, 'is_training': True, 'fused': True}}, 
 '<function batch_norm at 0x000002680A9D5E18>': {'decay': 0.997, 'epsilon': 1e-05, 'scale': True, 'updates_collections': None, 'is_training': True, 'fused': True}, 
 '<function max_pool2d at 0x000002680AA5DAE8>': {'padding': 'SAME'}

}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值