VGGNet-16(D级)实现--TensorFlow

VGGNet

优势: 探索卷积神经网络的深度与其性能之间的关系—>反复堆叠3 * 3 kernel 和 2 * 2 最大池化层—>成功构建了16~19层深度卷积神经网络—>迁移和泛化能力强,网络结构简单
[外链图片转存失败(img-JWb9qJR3-1567153752204)(attachment:image.png)]

  1. A —> E 六个级别网络结构
    [外链图片转存失败(img-gmPBgLMv-1567153752204)(attachment:%E6%8D%95%E8%8E%B72.PNG)]
#构建VGGNet-16(D级)
import tensorflow as tf
import time 
from datetime import datetime
import math
# 定义卷积层函数----->相比于AlexNet网络有了进一步的简化
def conv_op(input_op,name,kh,kw,n_out,dh,dw,parameters):
    '''
    input_op:输入的tensor
    name:tensor名称
    kh:kernel的高
    kw:kernel的宽
    n_out:tensor输出通道数
    dh:步长的高
    dw:步长的宽
    parameters:参数列表
    '''
    n_in = input_op.get_shape()[-1].value#获取tensor的通道数
    
    with tf.name_scope(name) as scope:
        #kernel进行初始化---->tf.contrib.layers.xavier_initializer_conv2d()
        kernel = tf.get_variable(scope+'w',
                                 shape=[kh,kw,n_in,n_out],dtype=tf.float32,
                                 initializer=tf.contrib.layers.xavier_initializer_conv2d())
        #biases初始化
        biases = tf.Variable(tf.constant(0.0,shape=[n_out],dtype=tf.float32),
                             trainable=True,name='bias')
        
        #计算卷积并激活relu
        conv = tf.nn.relu(
            tf.nn.bias_add(tf.nn.conv2d(input_op,kernel,[1,dh,dw,1],padding='SAME'),
                           biases),name=scope)
        parameters += [kernel,biases]
        
        return conv
# 定义全连接层函数
def fc_op(input_op,name,n_out,parameters):
    n_in = input_op.get_shape()[-1].value
    
    with tf.name_scope(name) as scope:
        kerenl = tf.get_variable(scope+'w',
                                 shape=[n_in,n_out],
                                 dtype=tf.float32,
                                 initializer=tf.contrib.layers.xavier_initializer())
        biases = tf.Variable(tf.constant(0.1,shape=[n_out],dtype=tf.float32),
                             name='bias')
        activation = tf.nn.relu_layer(input_op,kerenl,biases,name=scope)
        parameters += [kerenl,biases]
        
        return activation
# 定义最大池化层函数
def m_max_pool(input_op,name,kh,kw,dh,dw):
    return tf.nn.max_pool(input_op,
                          ksize=[1,kh,kw,1],
                          strides=[1,dh,dw,1],
                          padding='SAME',
                          name=name)
# 定义VGGNet的inference
def inference(input_op,keep_prop):
    '''
    keep_prop: dropout 策略的保留率
    '''
    parameters = []
    
    conv1_1 = conv_op(input_op,name='conv1_1',kh=3,kw=3,n_out=64,dh=1,dw=1,parameters=parameters)
    
    conv1_2 = conv_op(conv1_1,name='conv1_2',kh=3,kw=3,n_out=64,dh=1,dw=1,parameters=parameters)
    
    pool1 = m_max_pool(conv1_2,name='pool1',kh=2,kw=2,dh=2,dw=2)
    
    conv2_1 = conv_op(pool1,name='conv2_1',kh=3,kw=3,n_out=128,dh=1,dw=1,parameters=parameters)
    
    conv2_2 = conv_op(conv2_1,name='conv2_2',kh=3,kw=3,n_out=128,dh=1,dw=1,parameters=parameters)
    
    pool2 = m_max_pool(conv2_2,name='pool2',kh=2,kw=2,dh=2,dw=2)
    
    conv3_1 = conv_op(pool2,name='conv3_1',kh=3,kw=3,n_out=256,dh=1,dw=1,parameters=parameters)
    
    conv3_2 = conv_op(conv3_1,name='conv3_2',kh=3,kw=3,n_out=256,dh=1,dw=1,parameters=parameters)
    
    conv3_3 = conv_op(conv3_2,name='conv3_3',kh=3,kw=3,n_out=256,dh=1,dw=1,parameters=parameters)
    
    pool3 = m_max_pool(conv3_3,name='pool3',kh=2,kw=2,dh=2,dw=2)    
    
    conv4_1 = conv_op(pool3,name='conv4_1',kh=3,kw=3,n_out=512,dh=1,dw=1,parameters=parameters)
    
    conv4_2 = conv_op(conv4_1,name='conv4_2',kh=3,kw=3,n_out=512,dh=1,dw=1,parameters=parameters)
    
    conv4_3 = conv_op(conv4_2,name='conv4_3',kh=3,kw=3,n_out=512,dh=1,dw=1,parameters=parameters)
    
    pool4 = m_max_pool(conv4_3,name='pool4',kh=2,kw=2,dh=2,dw=2)      
    
    conv5_1 = conv_op(pool4,name='conv5_1',kh=3,kw=3,n_out=512,dh=1,dw=1,parameters=parameters)
    
    conv5_2 = conv_op(conv5_1,name='conv5_2',kh=3,kw=3,n_out=512,dh=1,dw=1,parameters=parameters)
    
    conv5_3 = conv_op(conv5_2,name='conv5_3',kh=3,kw=3,n_out=512,dh=1,dw=1,parameters=parameters)
    
    pool5 = m_max_pool(conv5_3,name='pool5',kh=2,kw=2,dh=2,dw=2)

    conv_shape = pool5.get_shape()
    
    flatten_shape = conv_shape[1].value * conv_shape[2].value * conv_shape[3].value
    
    resh1 = tf.reshape(pool5,[-1,flatten_shape],name='resh1')

    fc_6 = fc_op(resh1,name='fc_6',n_out=4096,parameters=parameters)

    fc_6drop = tf.nn.dropout(fc_6,rate=1-keep_prop,name='fc_6drop')
    
    fc_7 = fc_op(fc_6drop,name='fc_7',n_out=4096,parameters=parameters)

    fc_7drop = tf.nn.dropout(fc_7,rate=1-keep_prop,name='fc_7drop')
    
    fc_8 = fc_op(fc_7drop,name='fc_8',n_out=1000,parameters=parameters)

    softmax = tf.nn.softmax(fc_8)
    
    predictions = tf.argmax(softmax,1)
    
    return predictions,softmax,fc_8,parameters
    
    
# 测评
def time_tensorflow_run(session,target,feed,info_string):
    '''
    session:窗口
    target:评测对象
    feed:输入的数据
    info_string:对象名称
    '''
    num_steps_burn_in = 10#程序热身
    #用于计算方差的两个参数
    total_duration = 0.0
    total_duration_squared = 0.0
    
    for i in range(num_batches + num_steps_burn_in):
        start_time = time.time()
        _ = session.run(target,feed_dict=feed)
        duration = time.time() - start_time
        if i >= num_steps_burn_in:
            if not i % 10:
                print('%s:step %d,duration = %.3f'%(
                    datetime.now(),i - num_steps_burn_in,duration))
            total_duration += duration
            total_duration_squared += duration * duration
            
    #计算平均耗时和标准差
    mean_time = total_duration / num_batches
    sd = math.sqrt(total_duration_squared / num_batches - mean_time * mean_time)
    print('%s: %s across %d steps,%.3f +/- %.3f sec / batch'%(
        datetime.now(),info_string,num_batches,mean_time,sd))
# 构建一个自己的数据集(使用ImageNet数据集训练过程十分耗时)
def run_benchmark():
    #定义一个新的图进行计算
    with tf.Graph().as_default():
        image_size = 224
        #构造随机tensor
        images = tf.Variable(tf.random_normal([batch_size,
                                               image_size,
                                               image_size,3],
                                               dtype=tf.float32,stddev=0.1))
        keep_prop = tf.placeholder(tf.float32)
        predictions,softmax,fc_8,parameters = inference(images,keep_prop)
        
        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)
     
        time_tensorflow_run(sess,predictions,{keep_prop:1.0},'Forward')
        
        objetive = tf.nn.l2_loss(fc_8)#计算fc_8正则化损失
  
        grad = tf.gradients(objetive,parameters)
        time_tensorflow_run(sess,grad,{keep_prop:0.5},'Forward-Backward')
batch_size = 8
num_batches = 10
run_benchmark()
Tensor("pool5:0", shape=(8, 7, 7, 512), dtype=float32)
Tensor("resh1:0", shape=(8, 25088), dtype=float32)
Tensor("fc_6:0", shape=(8, 4096), dtype=float32)
Tensor("fc_7:0", shape=(8, 4096), dtype=float32)
Tensor("fc_8:0", shape=(8, 1000), dtype=float32)
Tensor("fc_8:0", shape=(8, 1000), dtype=float32)
2019-08-22 16:47:58.296828:step 0,duration = 1.367
2019-08-22 16:48:08.086028: Forward across 10 steps,1.115 +/- 0.227 sec / batch
Tensor("L2Loss:0", shape=(), dtype=float32)
2019-08-22 16:48:58.554828:step 0,duration = 4.645
2019-08-22 16:49:39.599828: Forward-Backward across 10 steps,4.569 +/- 0.126 sec / batch

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值