残差网络-ResNet V2

说起来ResNet也是咱们华人的骄傲,它是由4名华人提出来的。在ILSVRC 2015比赛中获得了冠军。

话不多说,还是来谈谈我的理解的吧,

ResNet很像Highway Network,都是允许原始输入信息直接传输到后面的层中。在传统的神经网络中,会出现Degradation的问题,随着准确率上升达到饱和,再持续增加深度会导致准确率下降,而且不光是再测试集误差会增大,训练集也会出现同样的问题。

ResNet的思路,假定神经网络输入是X,期望输出是H(X),直接将输入X传到输出作为初始结果,那么网络需要学习的目标就是F(X)=H(X)-X。这样一来目标函数就变了,不再是学习一个完整的输出H(X),而是输出和输入的差别H(X)-X,也就是残差了。

残差网络有两种网络模块单元,一种是两层结构,输入X,加上两层3*3的卷积层,以及H(X)-X。而另一种是三层结构,头尾是两个1*1的卷积层,中间是3*3的卷积层,进行先升维再降维的操作。

下面是摘自“Tensorflow实战”一书中的代码示例。

import collections
import tensorflow as tf
slim = tf.contrib.slim

class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
    'A named tuple describing a ResNet block.'

#subsample函数,判断残差是否需要,降采样
def subsample(inputs, factor, scope=None):
    if factor == 1:
        return inputs
    else:
        return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)

#pad zero和创建conv2d
def conv2d_same(inputs, num_outputs, kernel_size, stride, scope=None):
    if stride == 1:
        return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME', scope=scope)
    else:
        pad_total = kernel_size -1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        inputs = tf.pad(inputs, [0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0])
        return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride, padding='VALTD', scope=scope)



#stack_blocks_dense函数将每个block堆叠起来,形成block1/unit1的形式,而unit_fn创建残差学习单元,并连接
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, outputs_collections=None):

    for block in blocks:
        with tf.variable_scope(block.scope, 'block', [net]) as sc:
            for i, unit in enumerate(block.args):
                with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
                    unit_depth, unit_depth_bottleneck, unit_stride = unit
                    net = block.unit_fn(net,
                                        depth=unit_depth,
                                        depth_bottleneck=unit_depth_bottleneck,
                                        stride=unit_stride)
            net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)

    return net

#resnet_arg_scope函数,定义需要使用的函数参数,比如conv2d,bn,weight_decay
def resnet_arg_scope(is_training=True,
                     weight_decay=0.0001,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):

    batch_norm_params = {
        'is_training':is_training,
        'decay':batch_norm_decay,
        'epsilon':batch_norm_epsilon,
        'scale':batch_norm_scale,
        'updates_collections':tf.GraphKeys.UPDATE_OPS,
    }

    with slim.arg_scope(
        [slim.conv2d],
        weight_regularizer=slim.l2_regularizer(weight_decay),
        weight_initializer=slim.variance_scaling_initializer(),
        activation_fn=tf.nn.relu,
        normlizer_fn=slim.batch.norm,
        normlizer_params=batch_norm_params):
        with slim.arg_scope([slim.bathc_norm], **batch_norm_params):
            with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
                return arg_sc

   #bottleneck函数,定义残差网络结构
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, outputs_collections=None, scope=None):
    with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
        depth_in = slim.untils.last_dimension(inputs.get_shape(), min_rank=4)
        preact = slim.bathc_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
        if depth == depth_in:
            shortcut = subsample(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(preact, depth,
                                   [1, 1],
                                   stride=stride,
                                   normalizer_fn=None,
                                   activation_fn=None,
                                   scope='shortcut')

        residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
        residual = slim.conv2d(residual, depth_bottleneck, 3, stride, scope='conv2')
        residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope='conv3')

        output = shortcut + residual

        return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)

#生成主函数
def resnet_v2(inputs, blocks, num_classes=None, global_pool=True, include_root_block=True, reuse=None, scope=None):
    with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope([slim.conv2d, bottleneck, stack_blocks_dense], outputs_collections=end_points_collection):

            net = inputs
            if include_root_block:
                with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None):
                    net = conv2d_same(net, 64, 7, stride=2, scope='conv1')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
            net = stack_blocks_dense(net, blocks)
            net = slim.bathc_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
            if global_pool:
                net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)

            if num_classes is not None:
                net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits')
            end_points = slim.utils.convert_collcetion_to_dict(end_points_collection)
            if num_classes is not None:
                end_points['predictions'] = slim.softmax(net, scope='predictions')
            return net, end_points

#这里列举的是50层的残差网络
def resnet_v2_50(inputs, num_classes=None, global_pool=True, reuse=None, scope='resnet_v2_50'):
    bolcks = [
        Block('block1', bottleneck, [(255, 64, 1)] * 2 + [(256, 64, 2)]),
        Block('block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
        Block('block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]),
        Block('block4', bottleneck, [(2048, 512, 1)] * 3 )]
    return resnet_v2(inputs, bolcks,num_classes, global_pool, include_root_block=True, reuse=reuse, scope=scope)


batch_size = 32
height, width = 244, 244
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(resnet_arg_scope(is_training=False)):
    net, end_points = resnet_v2_50(inputs, 1000)

init = tf.global_variables_initializer()
sess = tf.Session
sess.run(init)
num_classes = 100

 ResNet的作者还提出了ResNet-V2,区别在于,将激活函数ReLu替换为Identity Mappings,同时每一层都使用了

Batch Normalization。


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值