基于Tensorflow的VGGNet16经典卷积网络模型的实现

1. 背景

        2014年ILSVRC图像分类大赛上,VGGNet网络模型以top-5错误率 7.3%取得了第二名的成绩。相比较当年第一名的谷歌GoogleNet模型(InceptionV1)top-5错误率6.6%略逊一筹,然而,在将网络迁移到其他图片数据上应用时,VGGNet却比GoogleNet有更好的泛化性。该模型是由牛津大学计算机视觉几何组合Google DeppMind公司研究员合作开发的深度卷积神经网络。在整个网络中,全部使用了大小相同的卷积核3x3和最大池化核2x2。根据网络深度不同以及是否使用LRN,VGGNet可以分为A~E6个级别。

2.实现

        在代码部分,这里选择D结构的VGGNet(又称VGGNet-16)来实现。

        模型定义代码如下:

"""
vggnet
"""

import tensorflow as tf

batch_size = 12
num_batches = 100


def conv_op(input, name, kernel_h, kernel_w, num_out, step_h, step_w, para):
    """
    定义卷积操作
    :param input: 输入张量
    :param name: 这一层名称
    :param kernel_h: 卷积核高度
    :param kernel_w: 卷积核宽度
    :param num_out: 输出通道数
    :param step_h: 高度上步长
    :param step_w: 宽度上步长
    :param para: 传递进来的参数列表
    :return:
    """
    num_in = input.get_shape()[-1].value
    with tf.name_scope(name) as scope:
        kernel = tf.get_variable(scope + "w", shape=[kernel_h, kernel_w, num_in, num_out], dtype=tf.float32,
                                 initializer=tf.contrib.layers.xavier_initializer_conv2d())
        conv = tf.nn.conv2d(input, kernel, [1, step_h, step_w, 1], padding="SAME")
        biases = tf.Variable(tf.constant(0.0, shape=[num_out], dtype=tf.float32))
        activation = tf.nn.relu(tf.nn.bias_add(conv, biases), name=scope)

        para += [kernel, biases]
        return activation


def fc_op(input, name, num_out, para):
    """
    全连接操作
    :param input: 输入张量
    :param name: 改层名称
    :param num_out: 输出层数量
    :param para: 传递进来的参数列表
    :return:
    """
    num_in = input.get_shape()[-1].value

    with tf.name_scope(name) as scope:
        weights = tf.get_variable(scope + "w", shape=[num_in, num_out], dtype=tf.float32,
                                  initializer=tf.contrib.layers.xavier_initializer_conv2d())
        biases = tf.Variable(tf.constant(0.1, shape=[num_out], dtype=tf.float32), name="b")

        activation = tf.nn.relu_layer(input, weights, biases)

        para += [weights, biases]
        return activation


def inference_op(input, keep_prob):
    """
    前向传播
    :param input:
    :param keep_prob:
    :return:
    """
    parameters = []
    # 第一段卷积
    # input: 224x224x3
    conv1_1 = conv_op(input, name="conv1_1", kernel_h=3, kernel_w=3, num_out=64, step_h=1, step_w=1, para=parameters)
    # 64@224x224
    conv1_2 = conv_op(conv1_1, name="conv1_2", kernel_h=3, kernel_w=3, num_out=64, step_h=1, step_w=1, para=parameters)
    # 64@224x224
    pool1 = tf.nn.max_pool(conv1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", name="pool1")
    # 64@112x112
    print(pool1.op.name, ' ', pool1.get_shape().as_list())

    # 第二段卷积
    conv2_1 = conv_op(pool1, name="conv2_1", kernel_h=3, kernel_w=3, num_out=128, step_h=1, step_w=1, para=parameters)
    # 128@112x112
    conv2_2 = conv_op(conv2_1, name="conv2_2", kernel_h=3, kernel_w=3, num_out=128, step_h=1, step_w=1, para=parameters)
    # 128@112x112
    pool2 = tf.nn.max_pool(conv2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", name="pool2")
    # 128@56x56
    print(pool2.op.name, ' ', pool2.get_shape().as_list())

    # 第三段卷积
    conv3_1 = conv_op(pool2, name="conv3_1", kernel_h=3, kernel_w=3, num_out=256, step_h=1, step_w=1, para=parameters)
    # 256@56x56
    conv3_2 = conv_op(conv3_1, name="conv3_2", kernel_h=3, kernel_w=3, num_out=256, step_h=1, step_w=1, para=parameters)
    # 256@56x56
    conv3_3 = conv_op(conv3_2, name="conv3_3", kernel_h=3, kernel_w=3, num_out=256, step_h=1, step_w=1, para=parameters)
    # 256@56x56
    pool3 = tf.nn.max_pool(conv3_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name="pool3")
    # 256@28x28
    print(pool3.op.name, ' ', pool3.get_shape().as_list())

    # 第四段卷积
    conv4_1 = conv_op(pool3, name="conv4_1", kernel_h=3, kernel_w=3, num_out=512, step_h=1, step_w=1, para=parameters)
    # 512@28x28
    conv4_2 = conv_op(conv4_1, name="conv4_2", kernel_h=3, kernel_w=3, num_out=512, step_h=1, step_w=1, para=parameters)
    # 512@28x28
    conv4_3 = conv_op(conv4_2, name="conv4_3", kernel_h=3, kernel_w=3, num_out=512, step_h=1, step_w=1, para=parameters)
    # 512@28x28
    pool4 = tf.nn.max_pool(conv4_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name="pool4")
    # 512@14x14
    print(pool4.op.name, ' ', pool4.get_shape().as_list())

    # 第五段卷积
    conv5_1 = conv_op(pool4, name="conv5_1", kernel_h=3, kernel_w=3, num_out=512, step_h=1, step_w=1, para=parameters)
    # 512@28x28
    conv5_2 = conv_op(conv5_1, name="conv5_2", kernel_h=3, kernel_w=3, num_out=512, step_h=1, step_w=1, para=parameters)
    # 512@28x28
    conv5_3 = conv_op(conv5_2, name="conv5_3", kernel_h=3, kernel_w=3, num_out=512, step_h=1, step_w=1, para=parameters)
    # 512@28x28
    pool5 = tf.nn.max_pool(conv5_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name="pool5")
    # 512@7x7
    print(pool5.op.name, ' ', pool5.get_shape().as_list())

    # pool5的结果汇总成一个向量的形式
    pool_shape = pool5.get_shape().as_list()
    flattened_shape = pool_shape[1] * pool_shape[2] * pool_shape[3]
    reshaped = tf.reshape(pool5, [-1, flattened_shape], name='reshaped')

    # 第一个全连接层
    fc_6 = fc_op(reshaped, name="fc6", num_out=4096, para=parameters)
    fc_6_drop = tf.nn.dropout(fc_6, keep_prob, name="fc6_drop")

    # 第二个全连接层
    fc_7 = fc_op(fc_6_drop, name="fc67", num_out=4096, para=parameters)
    fc_7_drop = tf.nn.dropout(fc_7, keep_prob, name="fc7_drop")

    fc_8 = fc_op(fc_7_drop, name="fc8", num_out=1000, para=parameters)
    softmax = tf.nn.softmax(fc_8)

    # 得到预测结果
    predictions = tf.argmax(softmax, 1)
    return predictions, softmax, fc_8, parameters

        下面是模型的测试代码:

"""
vggnet_16测试
"""
import datetime
import math
import time

import tensorflow as tf

from paper1.vggnet16.vggnet import batch_size, inference_op

with tf.Graph().as_default():
    image_size = 224
    num_batches = 100
    images = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3], dtype=tf.float32, stddev=1e-1))

    keep_prob = tf.placeholder(tf.float32)

    predictions, softmax, fc_8, parameters = inference_op(images, keep_prob)

    init_op = tf.global_variables_initializer()

    config = tf.ConfigProto()
    config.gpu_options.allocator_type = "BFC"
    with tf.Session(config=config) as sess:
        sess.run(init_op)

        num_steps_burn_in = 10

        total_dura = 0.0
        total_dura_squared = 0.0

        back_total_dura = 0.0
        back_total_dura_squared = 0.0

        # 定义求解前向传播进程
        for i in range(num_batches + num_steps_burn_in):
            start_time = time.time()
            _ = sess.run(predictions, feed_dict={keep_prob: 1.0})

            duration = time.time() - start_time

            if i >= num_steps_burn_in:
                if i % 10 == 0:
                    print("%s: step %d, duration = %.3f" % (datetime.datetime.now(),
                                                            i - num_steps_burn_in,
                                                            duration))
                    total_dura += duration
                    total_dura_squared += duration * duration

        average_time = total_dura / num_batches

        print("%s: Forward across %d steps, %.3f +/- %.3f sec/batch" % (
            datetime.datetime.now(),
            num_batches,
            average_time,
            math.sqrt(total_dura_squared / (num_batches - average_time * average_time))
        ))

        # =================== 测试反向传播过程 ====================
        grad = tf.gradients(tf.nn.l2_loss(fc_8), parameters)
        for i in range(num_batches + num_steps_burn_in):
            start_time = time.time()
            _ = sess.run(grad, feed_dict={keep_prob: 0.5})
            duration = time.time() - start_time

            if i >= num_steps_burn_in:
                if i % 10 == 0:
                    print("%s: step %d, duration=%.3f" % (datetime.datetime.now(), i - num_steps_burn_in, duration))
                back_total_dura += duration
                back_total_dura_squared += duration * duration

        back_avg_t = back_total_dura / num_batches

        # 打印反向传播的运算时间信息
        print("%s: Forward-backward accorss %d steps, %.3f +/- %.3f sec / batch" % (
            datetime.datetime.now(),
            num_batches,
            back_avg_t,
            math.sqrt(back_total_dura_squared / (num_batches - back_avg_t * back_avg_t))
        ))

 

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值