基于Tensorflow的LeNet-5经典卷积神经网络模型的实现

1.背景

        LeNet-5模型是Yann LeCun教授于1998年在论文Gradient-based learning applied to document recognition中提出,它是第一个成功应用于数字识别问题的卷积神经网络,其主要模型结构图如下:

2.主要代码

本文中的全部代码来源于TensorFlow实战Google深度学习框架第5、6章节。

模型定义代码cnndemo.py:

"""
resnet5 手写字体识别
"""

import tensorflow as tf


# 输入图片数组的长度
INPUT_NODE = 784
# 输出节点的个数
OUTPUT_NODE = 10
# 图片的大小
IMAGE_SIZE = 28
# 图片的通道数
NUM_CHANNELS = 1
# 分类结果
NUM_LABELS = 10


# 第一层卷积层 
CONV1_DEEP = 32 # 通道数
CONV1_SIZE = 5 # 卷积核大小

# 第二层卷积层
CONV2_DEEP = 64 # 通道数
CONV2_SIZE = 5 # 卷积核大小

# 全连接层的节点个数
FC_SIZE = 512


def inference(input_tensor, train, regularizer):
    """
    定义卷积神经网络前向传播的过程
    :param input_tensor:
    :param train:
    :param regularizer:
    :return:
    """
	# 第一层卷积层
    with tf.variable_scope('layer1-conv1'):
		# 卷积核的权重变量 声明参数变量中,前两个代表卷积核的尺寸,第三个维度代表当前层的深度,第四个维度代表过滤器的深度
        conv1_weights = tf.get_variable("weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
		# 卷积核偏置项变量
        conv1_biases = tf.get_variable("bias", [CONV1_DEEP], initializer=tf.constant_initializer(0.0))
		
		# 卷积层前向传播运算,第一个输入为当前层节点矩阵,第二个为卷积层权重,第三个为步长,第四个表示是否全0填充
		# input_tensor [批量数据大小:长:宽:通道数]
        conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')  # 32@28x28
		
		# 添加偏置,并经过激活函数relu运算
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))

    with tf.variable_scope("layer2-pool1"):
		# 最大池化操作,ksize提供了过滤器的尺寸,strides提供了步长信息,padding提供了是否使用全0填充
        pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')  # 32@14x14

    with tf.variable_scope("layer3-conv2"): # 同上
        conv2_weights = tf.get_variable("weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable('bias', [CONV2_DEEP], initializer=tf.constant_initializer(0.0))

        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')  # 64@14x14
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

    with tf.name_scope("layer4-pool2"): # 同上
        pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')  # 64@7x7

	# 这里接下去的操作就是把当前的矩阵都拉成一维的向量
    pool_shape = pool2.get_shape().as_list() # 获取尺寸信息并转化为列表,p[0]一个batch数据个数,p[1] 矩阵长,p[2]矩阵宽,p[3]矩阵深度
    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3] # 3136 = 64 * 7 * 7
	
	# 变成一个batch的向量
    reshaped = tf.reshape(pool2, [pool_shape[0], nodes])

	# 第五层为全连接层前向传播
    with tf.variable_scope("layer5-fc1"):  
        fc1_weights = tf.get_variable('weight', [nodes, FC_SIZE],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer is not None:
            tf.add_to_collection('losses', regularizer(fc1_weights))

        fc1_biases = tf.get_variable("bias", [FC_SIZE], initializer=tf.constant_initializer(0.1))

        fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
		
        if train:  # 如果是训练过程
            fc1 = tf.nn.dropout(fc1, 0.5)  # dropout 防止过拟合

	# 第六层同样为全连接层前向传播
    with tf.variable_scope("layer6-fc2"): 
        fc2_weights = tf.get_variable("weight", [FC_SIZE, NUM_LABELS],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer is not None:
            tf.add_to_collection('losses', regularizer(fc2_weights))
        fc2_biases = tf.get_variable("bias", [NUM_LABELS], initializer=tf.constant_initializer(0.1))
        logit = tf.matmul(fc1, fc2_weights) + fc2_biases
	
	# 返回第六层的输出
    return logit

训练代码mnist_test.py:

"""
神经网络的训练程序
"""
import os

import numpy as np
import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data

from paper1.conv.cnndemo import inference, OUTPUT_NODE, IMAGE_SIZE, NUM_CHANNELS

BATCH_SIZE = 100  # 批量处理数据的大小
LEARNING_RATE_BASE = 0.8  # 基础的学习率
LEARNING_RATE_DECAY = 0.99  # 学习衰减率
REGULARAZTION_RATE = 0.0001  # 正则化系数
TRAINING_STEPS = 5000  # 训练的轮数
MOVING_AVERAGE_DECAY = 0.99  # 滑动平均窗口衰减率

# 模型保存的路径和文件名
# MODEL_SAVE_PATH = "./model"
# MODEL_NAME = "model.ckpt"


def train(mnist):
    """
    训练数据
    :param mnist:
    :return:
    """
    # 定义输入数据和标签的placeholder
    x = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')

    # 定义l2正则化项目
    regularize = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)

    # 进行前向传播的运算
    y = inference(x, True, regularize)

    # 定义全局训练的步数
    global_step = tf.Variable(0, trainable=False)

    # 定义滑动平均模型以及作用于所有的训练的变量
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variables_average_op = variable_averages.apply(tf.trainable_variables())

    # 定义交叉熵和损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy = tf.reduce_mean(cross_entropy)
    loss = cross_entropy + tf.add_n(tf.get_collection('losses'))

    # 定义学习率
    learning_rage = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY,
        staircase=True)
    # 定义学习训练的过程,使用随机梯度下降法
    train_step = tf.train.GradientDescentOptimizer(learning_rage).minimize(loss, global_step=global_step)

    # 初始化所有变量并执行训练
    with tf.control_dependencies([train_step, variables_average_op]):
        train_op = tf.no_op(name='train')

    # 初始化tensorflow持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # 初始化所有的变量
        tf.global_variables_initializer().run()

        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs = np.reshape(xs,
                                     (BATCH_SIZE,
                                      IMAGE_SIZE,
                                      IMAGE_SIZE,
                                      NUM_CHANNELS))

            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})

            # 每100轮保存一次模型
            if i % 1000 == 0:
                print("After %d training steps, loss on training batch is %g." % (step, loss_value))
                # saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)


def main(argv=None):
    mnist = input_data.read_data_sets("E:\paper_exp\paper1\mnist", one_hot=True)
    train(mnist)


if __name__ == '__main__':
    tf.app.run()

 

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值