基于Tensorflow的ResNet经典卷积神经网络的实现

1.关于ResNet

        ResNet(Residual Neural Network)残差神经网络是由微软研究员的何凯明等人提出,该卷积神经网络达到了惊人的152层,在ILSVRC2015比赛中以top5错误率3.57%一举夺冠。相比较VGGNet,尽管ResNet模型的深度远远大于VGGNet,但是参数量确比较低。

        RestNet的提出的创新点在于引入了残差学习单元(Residual Unit)。该创新点受到了Shmidhuber2015年提出的HighwayNetwork的启发。Highway Network学习的内容在于用什么样的比例来保留原有信息,这 有点类似于早年提出的LSTM中的门控制单元,负责对流入某一单元的信息作出控制。得益于此,Highway Network可以在深度上百甚至上千的情形下,也能够直接使用梯度下降法进行训练,而不会产生梯度消失的问题。由此,RestNet 引入了Indentity Shortcut Connection,对于一个达到了饱和准确率比较浅的网络,当在后面添加几个全等映射层,误差不会因此而增加。

 

2.基于tensorflow的实现

构建脚本:

"""
ResNet 实现
"""
import tensorflow as tf
import collections

from tensorflow.contrib import slim


class Block(collections.namedtuple("block", ['name', 'residual_unit', 'args'])):
    """
    a named tuple describing a ResNet Block
    """


def conv2d_same(inputs, num_ouputs, kernel_size, stride, scope=None):
    """
    创建卷积层
    如果步长为1, 则直接使用padding='SAME'的方式进行卷积操作
    :param inputs:  输入张量
    :param num_ouputs:  输出通道数
    :param kernel_size: 卷积核大小
    :param stride:  步长
    :param scope:   范围
    :return:
    """
    if stride == 1:
        return slim.conv2d(inputs, num_ouputs, kernel_size, stride=1, padding='SAME', scope=scope)
    else:  # 如果步骤长不为1,则需要填充0,使用tf.pad方法进行填充tf.pad(inputs, [[0, 0],[上, 下],[左, 右],[0, 0]])
        pad_begin = (kernel_size - 1) // 2  # 向左边/上边 填充的个数
        pad_end = kernel_size - 1 - pad_begin  # 向右边/下边 填充的个数

        inputs = tf.pad(inputs, [[0, 0], [pad_begin, pad_end], [pad_begin, pad_end], [0, 0]])
        return slim.conv2d(inputs, num_ouputs, kernel_size, stride=stride, padding='VALID', scope=scope)


@slim.add_arg_scope
def residual_unit(inputs, depth, depth_residual, stride, outputs_collections=None, scope=None):
    """
    创建残差神经单元函数
    :param inputs:  输入
    :param depth: 第三层输出通道数
    :param depth_residual:  前两层输出通道数
    :param stride:  中间层步长
    :param outputs_collections:
    :param scope:
    :return:
    """
    with tf.variable_scope(scope, "residual_v2", [inputs]) as sc:
        depth_input = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)  # 输入通道数,获取inputs最后一个元素
        preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope="preac")  # 使用Batch Normalization 归一化处理

        if depth == depth_input:  # 如果输入通道数和输出通道数相等
            if stride == 1:  # 步长为 1
                identity = inputs
            else:  # 否则使用最大池化按步长为stride对inputs进行空间上的降采样
                identity = slim.max_pool2d(inputs, [1, 1], stride=stride, scope="shortcut")
        else:
            # 使用步长为stride 1 x 1 卷积改变通道数,使得与输出通道数一致
            identity = slim.conv2d(preact, depth, [1, 1], stride=stride, normalizer_fn=None, activation_fn=None,
                                   scope="shortcut")

        # 一个残差学习模块的3个卷积层
        residual = slim.conv2d(preact, depth_residual, [1, 1], stride=1, scope="conv1")
        residual = conv2d_same(residual, depth_residual, 3, stride, scope="conv2")
        residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope="conv3")

        # 将identity 的结果和 residual 结果相加
        output = identity + residual

        result = slim.utils.collect_named_outputs(outputs_collections, sc.name, output)

        return result


def resnet_v2(inputs, blocks, num_classes, reuse=None, scope=None):
    """
    生成完整的ResNet
    :param inputs:图片数据集
    :param blocks: block列表
    :param num_classes:最后输出的类别数
    :param reuse:
    :param scope:
    :return:
    """
    with tf.variable_scope(scope, "resnet_v2", [inputs], reuse=reuse) as sc:
        end_point_collection = sc.original_name_scope + "_end_points"

        with slim.arg_scope([residual_unit], outputs_collections=end_point_collection):
            with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None):
                # 创建64输出通道、步长为2的7x7卷积
                net = conv2d_same(inputs, 64, 7, stride=2, scope="conv1")
                # 创建步长为2,3x3的最大池化
                net = slim.max_pool2d(net, [3, 3], stride=2, scope="pool1")

                # 遍历block列表堆砌残差学习模块
                for block in blocks:
                    with tf.variable_scope(block.name, "block", [net]) as sc:
                        # 遍历每一个block类中的args参数
                        for i, tuple_value in enumerate(block.args):
                            with tf.variable_scope("unit_%d" % (i + 1), values=[net]):
                                # 获取第三层输出通道数,前两个卷积输出通道数,中间卷积层的步长
                                depth, depth_bottleneck, stride = tuple_value
                                # 创建残差神经单元
                                net = block.residual_unit(net, depth=depth, depth_residual=depth_bottleneck,
                                                          stride=stride)

                        # net就是每一块的结构
                        net = slim.utils.collect_named_outputs(end_point_collection, sc.name, net)

                # 对net使用slim.batch_norm()函数进行BatchNormalization操作
                net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope="postnorm")
                # 创建全局平局池化层
                net = tf.reduce_mean(net, [1, 2], name="pool5", keep_dims=True)

                # 如果定义了num_classes,通过1 x 1的卷积方式获得数目为num_classes 的输出
                if num_classes is not None:
                    net = slim.conv2d(net, num_classes, [1, 1],
                                      activation_fn=None, normalizer_fn=None, scope="logits")

        return net


def resnet_v2_152(inputs, num_classes, reuse=None, scope="resnet_v2_152"):
    """
    残差神经网络定义
    :param inputs:
    :param num_classes:
    :param reuse:
    :param scope:
    :return:
    """
    # ['name', 'residual_unit', 'args']
    blocks = [
        Block("block1", residual_unit, [(256, 64, 1), (256, 64, 1), (256, 64, 2)]),  # 3
        Block("block2", residual_unit, [(512, 128, 1)] * 7 + [(512, 128, 2)]),  # 8
        Block("block3", residual_unit, [(1024, 256, 2)] * 35 + [(1024, 256, 2)]),  # 36
        Block("block4", residual_unit, [(2048, 512, 1)] * 3)  # 4
    ]
    return resnet_v2(inputs, blocks, num_classes, reuse=reuse, scope=scope)

训练测试脚本:

"""
测试运行残差网络
"""
import math
import time
from datetime import datetime

import tensorflow as tf
from tensorflow.contrib import slim

from paper1.resnet.ResNet_struct import resnet_v2_152

# 批量处理数据的大小
batch_size = 32
# 批数
num_batches = 100
# 由于在训练的头几轮迭代存在显存加载以及其他硬件方面的问题,所以这里在num_batch基础之上增加了10轮,用于避免该问题
num_steps_burn_in = 10
# 总计训练时间
total_duration = 0.0
# 累加耗时的平方值
total_duration_squared = 0.0

# 随机初始化数据
inputs = tf.random_uniform((batch_size, 224, 224, 3))


def arg_scope(is_training=True,
              weight_decay=0.0001,
              batch_norm_decay=0.997,
              batch_norm_epsilon=1e-5,
              batch_norm_scale=True):
    """
    定义这些 函数的默认初始值
    :param is_training:
    :param weight_decay:
    :param batch_norm_decay:
    :param batch_norm_epsilon:
    :param batch_norm_scale:
    :return:
    """
    batch_norm_params = {
        "is_training": is_training,
        "decay": batch_norm_decay,
        "epsilon": batch_norm_epsilon,
        "scale": batch_norm_scale,
        "updates_collections": tf.GraphKeys.UPDATE_OPS
    }

    with slim.arg_scope([slim.conv2d], weights_initializer=slim.variance_scaling_initializer(),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        with slim.arg_scope([slim.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.max_pool2d], padding="SAME") as arg_scope:
                return arg_scope


# 定义模型的前向传播过程
with slim.arg_scope(arg_scope(is_training=False)):
    net = resnet_v2_152(inputs, 1000)

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        # 运行前向传播过程
        for i in range(num_batches + num_steps_burn_in):
            start_time = time.time()
            _ = sess.run(net)
            duration = time.time() - start_time

            if i >= num_steps_burn_in:
                if i % 10 == 0:
                    print('%s: step %d, duration=%.3f ' % (datetime.now(), i - num_steps_burn_in, duration))

                total_duration += duration
                total_duration_squared += duration * duration

        average_time = total_duration / num_batches
        print("%s: Forward across %d steps, %.3f +/- %.3f sec / batch" % (
            datetime.now(), num_batches, average_time,
            math.sqrt(total_duration_squared / (num_batches - average_time * average_time))
        ))

 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值