tensorflow实战的Alexnet网络

代码

################################导入常用系统库##################################
from datetime import datetime
import time
import math
import tensorflow as tf
##################################全连接层权重初始化函数########################
def variable_with_weight_loss(shape,stddev,w1):
    var=tf.Variable(tf.truncated_normal(shape,stddev=stddev))
    if w1 is not None:
        weight_loss = tf.multiply(tf.nn.l2_loss(var),w1,name='weight_loss')
        tf.add_to_collection('losses',weight_loss)
    return var
#################################基本参数设置###################################
batch_size=32
num_batches=100
##############################打印函数#########################################
'''显示每一层的名字和尺寸'''
def print_activations(t):
    print(t.op.name,' ',t.get_shape().as_list())
##############################网络结构########################################
def inference(images):
    parameters=[]
    
    with tf.name_scope('conv1') as scope:#将scope内生成的variable命名为conv1/XX
          kernel=tf.Variable(tf.truncated_normal([11,11,3,64],dtype=tf.float32,
                                                stddev=1e-1),name='weights')
          conv=tf.nn.conv2d(images,kernel,[1,4,4,1],padding='SAME')
          biases=tf.Variable(tf.constant(0.0,shape=[64],dtype=tf.float32),
                             trainable=True,name='biases')
          bias=tf.nn.bias_add(conv,biases)
          conv1=tf.nn.relu(bias,name=scope)
          print_activations(conv1)
          parameters+=[kernel,biases]#将可训练的参数添加到parameters中
###########################lrn层和池化层#######################################
#lrn层效果不明显,还会让前馈和反馈的速度大大降低,已有很多神经网络,模型放弃了'''
          lrn1=tf.nn.lrn(conv1,4,bias=1.0,alpha=0.001/9,beta=0.75,name='lrn1')
          pool1=tf.nn.max_pool(lrn1,ksize=[1,3,3,1],strides=[1,2,2,1],
                     padding='VALID',name='pool1')#取样时不超过边框,不填充边界
          print_activations(pool1)   
###############################卷积层二#######################################
    with tf.name_scope('conv2') as scope:#将scope内生成的variable命名为conv1/XX
          kernel=tf.Variable(tf.truncated_normal([5,5,64,192],dtype=tf.float32,
                                                stddev=1e-1),name='weights')
          conv=tf.nn.conv2d(pool1,kernel,[1,1,1,1],padding='SAME')
          biases=tf.Variable(tf.constant(0.0,shape=[192],dtype=tf.float32),
                             trainable=True,name='biases')
          bias=tf.nn.bias_add(conv,biases)
          conv2=tf.nn.relu(bias,name=scope)
          parameters+=[kernel,biases]
          print_activations(conv2)
###########################lrn层和池化层#######################################
#lrn层效果不明显,还会让前馈和反馈的速度大大降低,已有很多神经网络,模型放弃了'''          
          lrn2=tf.nn.lrn(conv2,4,bias=1.0,alpha=0.001/9,beta=0.75,name='lrn1')
          pool2=tf.nn.max_pool(lrn2,ksize=[1,3,3,1],strides=[1,2,2,1],
                     padding='VALID',name='pool1')#取样时不超过边框,不填充边界
          print_activations(pool2)     
###############################卷积层三#######################################
    with tf.name_scope('conv3') as scope:#将scope内生成的variable命名为conv1/XX
          kernel=tf.Variable(tf.truncated_normal([3,3,192,384],dtype=tf.float32,
                                                stddev=1e-1),name='weights')
          conv=tf.nn.conv2d(pool2,kernel,[1,1,1,1],padding='SAME')
          biases=tf.Variable(tf.constant(0.0,shape=[384],dtype=tf.float32),
                             trainable=True,name='biases')
          bias=tf.nn.bias_add(conv,biases)
          conv3=tf.nn.relu(bias,name=scope)
          parameters+=[kernel,biases]
          print_activations(conv3)
###############################卷积层四#######################################
    with tf.name_scope('conv4') as scope:#将scope内生成的variable命名为conv1/XX
          kernel=tf.Variable(tf.truncated_normal([3,3,384,256],dtype=tf.float32,
                                                stddev=1e-1),name='weights')
          conv=tf.nn.conv2d(conv3,kernel,[1,1,1,1],padding='SAME')
          biases=tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32),
                             trainable=True,name='biases')
          bias=tf.nn.bias_add(conv,biases)
          conv4=tf.nn.relu(bias,name=scope)
          parameters+=[kernel,biases]
          print_activations(conv4)
###############################卷积层五#######################################
    with tf.name_scope('conv5') as scope:#将scope内生成的variable命名为conv1/XX
          kernel=tf.Variable(tf.truncated_normal([3,3,256,256],dtype=tf.float32,
                                                stddev=1e-1),name='weights')
          conv=tf.nn.conv2d(conv4,kernel,[1,1,1,1],padding='SAME')
          biases=tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32),
                             trainable=True,name='biases')
          bias=tf.nn.bias_add(conv,biases)
          conv5=tf.nn.relu(bias,name=scope)
          parameters+=[kernel,biases]
          print_activations(conv5)
################################最大池化层####################################
    pool5=tf.nn.max_pool(conv5,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',name='pool5')
    print_activations(pool5)
    return pool5,parameters
###############################全连接层函数###################################
def full_connect(pool):
###############################全连接层一#######################################
         reshape=tf.reshape(pool,[batch_size,-1])#使数据扁平化
         dim=reshape.get_shape()[1].value
         weight1=variable_with_weight_loss(shape=[dim,4096],stddev=0.04,w1=0.004)
         bias1=tf.Variable(tf.constant(0.1,shape=[4096]))
         local1=tf.nn.relu(tf.matmul(reshape,weight1)+bias1)
###############################全连接层二#######################################
         weight2=variable_with_weight_loss(shape=[4096,4096],stddev=0.04,w1=0.004)
         bias2=tf.Variable(tf.constant(0.1,shape=[4096]))
         local2=tf.nn.relu(tf.matmul(local1,weight2)+bias2)
###############################全连接层三#######################################
         weight3=variable_with_weight_loss(shape=[4096,1000],stddev=1/4096,w1=0.0)
         bias3=tf.Variable(tf.constant(0.1,shape=[1000]))
         local3=tf.nn.relu(tf.matmul(local2,weight3)+bias3)
         return local3
##############################每轮计算时间函数##################################
def time_tensorflow_run(session,target,info_string):
      num_steps_burn_in=10#预热轮数,只计算十轮以后的时间
      total_duration=0.0#计算所需总时间
      total_duration_squared=0.0#计算方差
      for i in range(num_batches+num_steps_burn_in):#显示每轮需要的时间
            start_time=time.time()
            _=session.run(target)
            duration=time.time()-start_time
            if i >=  num_steps_burn_in:
                print('%s:step%d,duration=%.3f'%(datetime.now(),i-num_steps_burn_in,duration))
            total_duration+=duration#以计算每轮需要平均时间和标准差
            total_duration_squared=duration*duration
      mn=total_duration/num_batches#平均耗时
      vr=total_duration_squared/num_batches-mn*mn
      sd=math.sqrt(vr)
      print('%s:%s across %d steps,.3f +/-%.3f sec/batch'%(datetime.now(),info_string,num_batches,mn,sd))
def run_benchmark():
       with tf.Graph().as_default():
           image_size=224
           image=tf.Variable(tf.random_normal(
                   [batch_size,
                    image_size,
                    image_size,3],
                   dtype=tf.float32,
                   stddev=1e-1
                   ))
           pool5,parameters=inference(image)
           #local=full_connect(pool5)
           init=tf.global_variables_initializer()
           sess=tf.Session()
           sess.run(init)
##########################计算评测过程##########################################
#'''忽略了参数优化过程'''          
           time_tensorflow_run(sess,pool5,"Forward")
           objective=tf.nn.l2_loss(pool5)#优化目标
           grad=tf.gradients(objective,parameters)#求loss相对于所有模型的梯度
           time_tensorflow_run(sess,grad,"Forward-backward")
run_benchmark()

出现问题:


解决办法:安装myTensorflow虚拟环境,安装连接http://blog.sina.com.cn/s/blog_639a2ad70102xbh3.html

**出现问题:**ValueError: math domain error

解决办法:暂未解决,但查明是数学原因引起的错误

注意事项:当出现model has no attribute XX时,首先检查所输属性有没有正确输入

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值