TensorFlow实现AlexNet

1、AlexNet模型简介

AlexNet模型是Hinton的学生Alex Krizhevsky在2012年提出来的。AlexNet包含了几个比较新的技术点,也首次在CNN中成功应用了ReLU、Dropout和LRN等Trick。
AlexNet主要使用的新技术点如下:
(1)成功使用ReLU作为CNN的激活函数,并验证其效果在较深的网络超过了sigmoid,解决了sigmoid在网络较深时的梯度弥散问题。
(2)训练时使用dropout随机忽略一部分神经元,以避免过拟合,主要是最后几个全连接层使用了dropout。
(3)在CNN中使用重叠的最大池化,避免平均池化的模糊化效果。并且提出让步长比池化核的尺寸小,这样池化层的输出之间会有重叠和覆盖,提升了特征的丰富性。
(4)提出了LRN层,对局部的神经元活动创建竞争机制,使其中响应比较大的值变得更大,增强了模型的泛化能力。
(5)使用CUDA加速深度卷积网络的训练,利用GPU强大的并行计算能力,处理神经网络训练时大量的矩阵运算。
(6)数据增强,大大减轻过拟合,提升泛化能力。

2、代码如下



#导入库
import time
import math
import datetime
import tensorflow as tf

#设置参数
batch_size = 32
num_batches = 100

#定义一个用来显示网络每层结构的函数print_actications,展示每一个卷积层或池化层输出tensor的尺寸
def print_activations(t):
    print(t.op.name,'',t.get_shape().as_list())

#设计AlexNet的网络结构
#定义inference函数,它接受images作为输入,返回最后一层pool5及parameters
def inference(images):
    parameters = []

#第一卷积层
    with tf.name_scope('conv1') as scope:
        kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64],
                                                  dtype = tf.float32, stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding='SAME')
        biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
                             trainable=True, name='biases')
        bias = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(bias, name=scope)
        print_activations(conv1)
        parameters +=[kernel, biases]
#第一池化层
        lrn1 = tf.nn.lrn(conv1, 4, bias=1.0,alpha=0.001/9,beta=0.75,name='lrn1')
        pool1 = tf.nn.max_pool(lrn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                               padding='VALID', name='pool1')
        print_activations(pool1)

#第二卷积层
    with tf.name_scope('conv2') as scope:
        kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192],
                                                  dtype = tf.float32, stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32),
                             trainable=True, name='biases')
        bias = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(bias, name=scope)
        print_activations(conv2)
        parameters +=[kernel, biases]
#第二池化层
        lrn2 = tf.nn.lrn(conv2, 4, bias=1.0,alpha=0.001/9,beta=0.75,name='lrn2')
        pool2 = tf.nn.max_pool(lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                               padding='VALID', name='pool2')
        print_activations(pool2)

#第三卷积层
    with tf.name_scope('conv3') as scope:
        kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384],
                                                  dtype = tf.float32, stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32),
                             trainable=True, name='biases')
        bias = tf.nn.bias_add(conv, biases)
        conv3 = tf.nn.relu(bias, name=scope)
        print_activations(conv3)
        parameters +=[kernel, biases]

#第四卷积层
    with tf.name_scope('conv4') as scope:
        kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256],
                                                  dtype = tf.float32, stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
                             trainable=True, name='biases')
        bias = tf.nn.bias_add(conv, biases)
        conv4 = tf.nn.relu(bias, name=scope)
        print_activations(conv4)
        parameters +=[kernel, biases]

#第五卷积层
    with tf.name_scope('conv5') as scope:
        kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256],
                                                  dtype = tf.float32, stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
                             trainable=True, name='biases')
        bias = tf.nn.bias_add(conv, biases)
        conv5 = tf.nn.relu(bias, name=scope)
        print_activations(conv5)
        parameters +=[kernel, biases]

    pool5 = tf.nn.max_pool(conv5, ksize=[1,3,3,1],strides=[1,2,2,1],
                           padding='VALID',name='pool5')
    print_activations(pool5)
    return pool5,parameters

#实现一个评估AlexNet每轮计算时间的函数time_tensorflow_run
def time_tensorflow_run(session,target,info_string):
    num_steps_burn_in = 10
    total_duration = 0.0
    total_duration_aquared = 0.0
    for i in range(num_batches + num_steps_burn_in):
        start_time = time.time()
        _ = session.run(target)
        duration = time.time() - start_time
        if i >= num_steps_burn_in:
            if not i % 10:
                print('%s: step %d, duration = %.3f' %
                      (datetime.datetime.now(), i-num_steps_burn_in,duration))
                total_duration += duration
                total_duration_aquared += duration * duration

    mn = total_duration / num_batches
    vr = total_duration_aquared / num_batches - mn * mn
    sd = math.sqrt(vr)
    print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
          (datetime.datetime.now(), info_string, num_batches, mn,sd))

#主函数run_benchmark
def run_benchmark():
    with tf.Graph().as_default():
        image_size = 224
        images = tf.Variable(tf.random_normal([batch_size,image_size,
                                               image_size, 3],
                                              dtype=tf.float32, stddev=1e-1))
        pool5, parameters = inference(images)
        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)

        time_tensorflow_run(sess, pool5, "Forward")

        objective = tf.nn.l2_loss(pool5)
        grad = tf.gradients(objective, parameters)
        time_tensorflow_run(sess, grad, "Forward-backward")

run_benchmark()

链接: 上一篇.
链接: 下一篇.

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值