深度学习小白——tensorflow(四)CIFAR-10实例

最近看了https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10 这个实例,里面有大量的tf库函数,看了好久才大概看个明白,想在此做个学习笔记,把一些函数用途以及整个CNN网络框架记录下来。


一、数据读取

因为之前写过,见http://blog.csdn.net/margretwg/article/details/70168256,这里就不重复了


二、模型建立


全局参数

import os
import re
import sys
import tarfile
import tensorflow as tf
import CIFAR10.CIFAR_input as input
FLAGS=tf.app.flags.FLAGS

#模型参数
tf.app.flags.DEFINE_integer('batch_size', 128,
                            """Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', 'E:/Python/tensorflow/CIFAR10',
                           """Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
                            """Train the model using fp16.""")

#全局变量
IMAGE_SIZE=input.IMAGE_SIZE
NUM_CLASSES=input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN=input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL=input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL

#训练过程中的常量
MOVING_AVERAGE_DECAY=0.9999
NUM_EPOCH_PER_DECAY=350.0 #epochs after which learning rate decays
LEARNING_RATE_DECAY_FACTOR=0.1 #学习率衰减因子
INITIAL_LEARNING_RATE=0.1



2.1 模型预测inference()

主要有:conv1-->pool1-->norm1-->conv2-->norm2-->pool2-->local3-->local4-->softmax_linear

该模块返回的是(128,10)的张量

def inference(images):
    """
    创建CIFAR-10模型
    :param images: Images来自distorted_inputs()或inputs()
    :return:
    Logits神经元
    """
    #conv1
    with tf.variable_scope('conv1')as scope:
        kernel=_variable_with_weight_decay('weights',shape=[5,5,3,64],stddev=5e-2,wd=0.0)
        conv=tf.nn.conv2d(images,kernel,[1,1,1,1],padding='SAME')#卷积操作
        biases=_variable_on_cpu('biases',[64],tf.constant_initializer(0.0))
        pre_activation=tf.nn.bias_add(conv,biases)# WX+b
        conv1=tf.nn.relu(pre_activation,name=scope.name)
        _activation_summary(conv1)

    #pool1
    pool1=tf.nn.max_pool(conv1,ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME',name='pool1')


    #norm1
    norm1=tf.nn.lrn(pool1,4,bias=1.0,alpha=0.001/9.0,beta=0.75,name='norm1')

    #conv2
    with tf.variable_scope('conv2') as scope:
        kernel=_variable_with_weight_decay('weights',shape=[5,5,64,64],stddev=5e-2,wd=0.0)
        conv=tf.nn.conv2d(norm1,kernel,[1,1,1,1],padding='SAME')
        biases=_variable_on_cpu('biases',[64],tf.constant_initializer(0.1))
        pre_activation=tf.nn.bias_add(conv,biases)
        conv2=tf.nn.relu(pre_activation,name=scope.name)
        _activation_summary(conv2)

     #norm2
    norm2=tf.nn.lrn(conv2,4,bias=1.0,alpha=0.001/9.0,beta=0.75,name='norm2')

    #pool2
    pool2=tf.nn.max_pool(norm2,ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME',name='pool2')

    #local3
    with tf.variable_scope('local3')as scope:
        #Move everything into depth so we can perform a single matrix multiply
        reshape=tf.reshape(pool2,[FLAGS.batch_size,-1])
        dim=reshape.get_shape()[1].value
        weights=_variable_with_weight_decay('weights',shape=[dim,384],stddev=0.04,wd=0.004)
        biases=_variable_on_cpu('biases',[384],tf.constant_initializer(0.1))
        local3=tf.nn.relu(tf.matmul(reshape,weights)+biases,name=scope.name)
        _activation_summary(local3)

     #local4
    with tf.variable_scope('local4') as scope:
        weights = _variable_with_weight_decay('weights', shape=[384, 192],
                                              stddev=0.04, wd=0.004)
        biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
        local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
        _activation_summary(local4)

    with tf.variable_scope('softmax_linear') as scope:
        weights=_variable_with_weight_decay('weights',[192,NUM_CLASSES],stddev=1/192.0,wd=0.0)
        biases=_variable_on_cpu('biases',[NUM_CLASSES],tf.constant_initializer(0.0))
        softmax_linear=tf.add(tf.matmul(local4,weights),biases,name=scope.name)
        _activation_summary(softmax_linear)

    return softmax_linear #


其中,_variable_with_weight_decay()函数用于初始化weights,并且这里带一个衰减系数wd,用于计算权重衰减loss,加入到collection中,方便最后计算total_loss

def _variable_with_weight_decay(name,shape,stddev,wd):
    """
    Helper to create an initialized Variable with weight decay

    这里变量被初始化为截断正态分布
    :param stddev:标准差
    :param wd: add L2 loss weight decay multiplied by this float. If None, weight decay is not added for this Variable
    :return:
    Variable tensor
    """

    dtype=tf.float16 if FLAGS.use_fp16 else tf.float32
    var=_variable_on_cpu(name,shape,tf.truncated_normal_initializer(stddev=stddev,dtype=dtype))
    if wd is not None:
      weight_decay=tf.multiply(tf.nn.l2_loss(var),wd,name='weight_loss')
      tf.add_to_collection('losses',weight_decay)

    return var



_variable_on_cpu()函数即在CPU上创建初始化了的name=name,shape=shape的变量

def _variable_on_cpu(name,shape,initializer):
    """
    Helper to create a Variable stored oon CPU memory
    :param name: 变量名
    :param shape: lists of ints
    :param initializer: 初始化变量值
    :return
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值