Tensorflow实战( 四)经典卷积神经网络之实现AlexNet

创新性:成功的使用了Relu、DropOut、LPN和使用cuda对深度卷积卷积网路的训练。

重点:


实验代码:

# -*- coding: utf-8 -*-
"""
Created on Fri Jan 19 21:04:52 2018

@author: hui
"""

from datetime import datetime
import math
import time
import tensorflow as tf
batch_size=32
num_batches=100

def print_avtivations(t):
    print(t.op.name,' ',t.get_shape().as_list())
def interfence(images):
    parameters=[]
    with tf.name_scope('conv1') as scope:
        kernel=tf.Variable(tf.truncated_normal([11,11,3,64],
                                               dtype=tf.float32,stddev=1e-1),
                                               name='weights')
        conv=tf.nn.conv2d(images,kernel,[1,4,4,1],padding='SAME')
        biases=tf.Variable(tf.constant(0.0,shape=[64],dtype=tf.float32),
                           trainable=True,name='biases')
        bias=tf.nn.bias_add(conv,biases)
        conv1=tf.nn.relu(bias,name=scope)
        print_avtivations(conv1)
        parameters+=[kernel,biases]
    lrn1=tf.nn.lrn(conv1,4,bias=1.0,alpha=0.001/9,beta=0.75,name='lrn1')
    pool1=tf.nn.max_pool(lrn1,ksize=[1,3,3,1],strides=[1,2,2,1],
                             padding='VALID',name='pool1')
    print_avtivations(pool1)
    
    with tf.name_scope('conv2') as scope:
        kernel=tf.Variable(tf.truncated_normal([5,5,64,192],
                                                   dtype=tf.float32,
                                                   stddev=1e-1),
                                                   name='weights')
        conv=tf.nn.conv2d(pool1,kernel,[1,1,1,1],padding='SAME')
        biases=tf.Variable(tf.constant(0.0,shape=[192],dtype=tf.float32),
                           trainable=True,name='biases')
        bias=tf.nn.bias_add(conv,biases)
        conv2=tf.nn.relu(bias,name=scope)
        parameters+=[kernel,biases]
    print_avtivations(conv2)
    
    
    lrn2=tf.nn.lrn(conv2,4,bias=1.0,alpha=0.001/9,beta=0.75,name='lrn2')
    pool2=tf.nn.max_pool(lrn2,ksize=[1,3,3,1],strides=[1,2,2,1],
                             padding='VALID',name='pool2')
    print_avtivations(pool2)
        
    with tf.name_scope('conv3') as scope:
        kernel=tf.Variable(tf.truncated_normal([3,3,192,384],
                                                   dtype=tf.float32,
                                                   stddev=1e-1),
                                                   name='weights')
        conv=tf.nn.conv2d(pool2,kernel,[1,1,1,1],padding='SAME')
        biases=tf.Variable(tf.constant(0.0,shape=[384],dtype=tf.float32),
                           trainable=True,name='biases')
        bias=tf.nn.bias_add(conv,biases)
        conv3=tf.nn.relu(bias,name=scope)
        parameters+=[kernel,biases]
    print_avtivations(conv3)
    
    with tf.name_scope('conv4') as scope:
        kernel=tf.Variable(tf.truncated_normal([3,3,384,256],
                                                   dtype=tf.float32,
                                                   stddev=1e-1),
                                                   name='weights')
        conv=tf.nn.conv2d(conv3,kernel,[1,1,1,1],padding='SAME')
        biases=tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32),
                           trainable=True,name='biases')
        bias=tf.nn.bias_add(conv,biases)
        conv4=tf.nn.relu(bias,name=scope)
        parameters+=[kernel,biases]
    print_avtivations(conv4)   

    with tf.name_scope('conv5') as scope:
        kernel=tf.Variable(tf.truncated_normal([3,3,256,256],
                                                   dtype=tf.float32,
                                                   stddev=1e-1),
                                                   name='weights')
        conv=tf.nn.conv2d(conv4,kernel,[1,1,1,1],padding='SAME')
        biases=tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32),
                           trainable=True,name='biases')
        bias=tf.nn.bias_add(conv,biases)
        conv5=tf.nn.relu(bias,name=scope)
        parameters+=[kernel,biases]
    print_avtivations(conv5)
    pool5=tf.nn.max_pool(conv5,ksize=[1,3,3,1],strides=[1,2,2,1],
                             padding='VALID',name='pool5')
    print_avtivations(pool5)
    return pool5,parameters

def time_tensorflow_run(session,target,info_string):
        num_steps_brun_in=10
        total_duration=0.0
        total_duration_squared=0.0
        for i in range(num_batches+num_steps_brun_in):
            start_time=time.time()
            _=session.run(target)
            duration=time.time()-start_time
            if i>=num_steps_brun_in:
                if not i%10:
                    print('%s:step %d,duration=%3f'%(datetime.now(),
                                                     i-num_steps_brun_in,
                                                     duration))
                total_duration+=duration
                total_duration_squared+=duration*duration
                mn=total_duration/num_batches
                vr=total_duration_squared/num_batches-mn*mn
                sd=math.sqrt(vr)
                print('%s: %s across %d steps,%.3f+-%.3f sec/batch'%(datetime.now(),
                                                                     info_string,
                                                                     num_batches,
                                                                     mn,
                                                                     sd))
def run_benchmark():
        with tf.Graph().as_default():
            image_size=224
            images=tf.Variable(tf.random_normal([batch_size,image_size,image_size,3],
                                                dtype=tf.float32,stddev=1e-1))
            pool5,parameters=interfence(images)
            init=tf.global_variables_initializer()
            sess=tf.Session()
            sess.run(init)
            time_tensorflow_run(sess,pool5,"forward")
            objective=tf.nn.l2_loss(pool5)
            grad=tf.gradients(objective,parameters)
            time_tensorflow_run(sess,grad,"forward-backward")
run_benchmark() 
               

运行结果:


调试错误:

TypeError: ‘NoneType’ object is not iterable

未返回值(忘记写return)




  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值