tensorflow实践——MNIST(3),经典卷积网络模型LeNet5训练MNIST手写数字集,测试集正确率99.3%以上。

文章介绍了如何使用Tensorflow构建和训练LeNet-5模型,这是一个经典的深度卷积神经网络,用于图像识别任务。代码包括推理、训练和评估三个部分,涉及卷积层、池化层和全连接层的构建,以及梯度下降学习法和正则化的应用。
摘要由CSDN通过智能技术生成

LeNet-5是一个经典的深度卷积神经网络,由Yann LeCun教授于1998年的论文Gradient-Based Learning Applied to Document Recognition中提出,本文代码是《Tensorflow 实战Google深度学习框架》6.4.2小节的例子(与论文中的深度学习网络有一些不同), 由于环境不同做了一些修改,可以正常运行。原代码中注释都省略了,详细注释参考原书籍。

程序运行结果:

源代码分3个文件:mnist_inference.py、mnist_train.py、mnist_eval.py,完整源代码如下:

文件mnist_inference.py:

import tensorflow as tf
tf.compat.v1.disable_eager_execution()

INPUT_NODE=784
OUTPUT_NODE=10

IMAGE_SIZE=28
NUM_CHANNELS=1
NUM_LABELS=10

# 第一层卷积层
CONV1_DEEP=32
CONV1_SIZE=5
# 第二层卷积层
CONV2_DEEP=64
CONV2_SIZE=5
#全连接层的节点个数
FC_SIZE=512

# 推理
def inference(input_tensor, train, regularizer):
    with tf.compat.v1.variable_scope('layer1-conv1'):
        conv1_weights=tf.compat.v1.get_variable("weights", 
                        initializer=tf.random.truncated_normal(
                        shape=[CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
                        stddev=0.1))
        conv1_biases=tf.compat.v1.get_variable("biases",
                initializer=tf.constant(0.0,shape=[CONV1_DEEP]))
        conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1,1,1,1], padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))

    with tf.compat.v1.variable_scope('layer2-pool1'):
        pool1=tf.nn.max_pool(relu1, ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

    with tf.compat.v1.variable_scope('layer3-conv2'):
        conv2_weights=tf.compat.v1.get_variable("weights", 
                initializer=tf.random.truncated_normal(
                        shape=[CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
                        stddev=0.1))
        conv2_biases=tf.compat.v1.get_variable("biases", 
                initializer=tf.constant(0.0,shape=[CONV2_DEEP]))
        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1,1,1,1], padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

    with tf.compat.v1.variable_scope('layer4-pool2'):
        pool2=tf.nn.max_pool(relu2, ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

    pool_shape=pool2.get_shape().as_list()
    nodes=pool_shape[1]*pool_shape[2]*pool_shape[3]
    reshaped=tf.reshape(pool2, [pool_shape[0], nodes])

    with tf.compat.v1.variable_scope('layer5-fc1'):
        fc1_weights=tf.compat.v1.get_variable("weights", 
                initializer=tf.random.truncated_normal(
                        shape=[nodes, FC_SIZE],
                        stddev=0.1))

        if regularizer != None:
            tf.compat.v1.add_to_collection('losses', regularizer(fc1_weights))

        fc1_biases=tf.compat.v1.get_variable("bias", 
                initializer=tf.constant(0.1,shape=[FC_SIZE]))

        fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)

        if train: fc1=tf.nn.dropout(fc1, 0.5)

    with tf.compat.v1.variable_scope('layer6-fc2'):
        fc2_weights=tf.compat.v1.get_variable("weights", 
                initializer=tf.random.truncated_normal(
                        shape=[FC_SIZE, NUM_LABELS],
                        stddev=0.1))

        if regularizer != None:
            tf.compat.v1.add_to_collection('losses', regularizer(fc2_weights))

        fc2_biases=tf.compat.v1.get_variable("bias", 
                initializer=tf.constant(0.1,shape=[NUM_LABELS]))

        logit = tf.matmul(fc1, fc2_weights) + fc2_biases

    return logit

文件:mnist_train.py: 

import os
import tensorflow as tf
import mnist_inference

BATCH_SIZE=100

LEARNING_RATE_BASE=0.02
LEARNING_RATE_DECAY=0.99

REGULARIZATION_RATE=0.0001
TRAINING_STEPS=30000
MOVING_AVERAGE_DECAY=0.99
dataset_size = 60000

MODEL_SAVE_PATH = ".tfmodel/"
MODEL_NAME="model_mnist5.5.ckpt"

tf.compat.v1.disable_eager_execution()
#加载数据, 会自动下载数据文件mnist.npz, 一般保存在C:/user/你的用户名/.keras/datasets
mint=tf.keras.datasets.mnist
(x_tr,y_tr),(x_te,y_te)=mint.load_data()
#标签数据由类别向量转换为浮点型类别矩阵
y_tr = tf.keras.utils.to_categorical(y_tr,10,dtype='float32')
y_te = tf.keras.utils.to_categorical(y_te,10,dtype='float32')
#占位符
x_f_ph=tf.compat.v1.placeholder(tf.float32)
x_n_ph=tf.compat.v1.placeholder(tf.int32)
# 手写数字图片位图转换为浮点型向量, 并除以255做简单正规化
x_ftr_op = tf.cast(x_tr, tf.float32)
x_vtr_op = tf.reshape((x_f_ph-125) / 255.0, [x_n_ph, -1])
x_fte_op = tf.cast(x_te, tf.float32)
x_vte_op = tf.reshape((x_f_ph-125) / 255.0, [x_n_ph, -1])

with tf.compat.v1.Session() as sess:
    tf.compat.v1.global_variables_initializer().run()
    x_ftr = sess.run(x_ftr_op)
    x_vtr = sess.run(x_vtr_op, feed_dict={x_f_ph:x_ftr, x_n_ph:x_ftr.shape[0]})
    x_fte = sess.run(x_fte_op)
    x_vte = sess.run(x_vte_op, feed_dict={x_f_ph:x_fte, x_n_ph:x_fte.shape[0]})
    # print(x_ftr[0:1,1:10, 2:8])
    print(x_fte.shape)
    # print(x_vtr[0:1,0:784])
    print(x_vte.shape)

#显示一个位图, 显示图片时程序执行会暂停, 关闭图片后继续
# import matplotlib.pyplot as plt
# plt.imshow(x_fte[0],cmap="gray")
# plt.show()
print(x_fte[0:1,1:10])
print("y_te[0]: ", y_te[0])

def train():
    x_reshape = tf.compat.v1.placeholder(tf.float32)
    x_reshape_op = tf.reshape(x_reshape, [
                   BATCH_SIZE, 
                   mnist_inference.IMAGE_SIZE, 
                   mnist_inference.IMAGE_SIZE,
                   mnist_inference.NUM_CHANNELS])

    x = tf.compat.v1.placeholder(tf.float32,  [
                 BATCH_SIZE,
                 mnist_inference.IMAGE_SIZE,
                 mnist_inference.IMAGE_SIZE,
                 mnist_inference.NUM_CHANNELS], name='x-input')

    y_ = tf.compat.v1.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NODE], name='y-input')

    regularizer = tf.keras.regularizers.L2(REGULARIZATION_RATE)

    y = mnist_inference.inference(x, True, regularizer)

    global_step = tf.Variable(0, trainable=False)
    variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)

    variable_averages_op = variable_averages.apply(tf.compat.v1.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.math.argmax(y_, 1))
    cross_entropy_mean = tf.compat.v1.reduce_mean(cross_entropy)

    loss = cross_entropy_mean + tf.math.add_n(tf.compat.v1.get_collection('losses'))

    # learning_rate = tf.compat.v1.train.exponential_decay(LEARNING_RATE_BASE,
    #                   global_step, dataset_size / BATCH_SIZE, LEARNING_RATE_DECAY,staircase=True)
    learning_rate = tf.compat.v1.train.exponential_decay(LEARNING_RATE_BASE,
                      global_step, dataset_size / BATCH_SIZE, LEARNING_RATE_DECAY)

    train_step=tf.compat.v1.train.GradientDescentOptimizer(learning_rate)\
                   .minimize(loss, global_step=global_step)

    with tf.control_dependencies([train_step, variable_averages_op]):
            train_op = tf.no_op(name='train')

    saver = tf.compat.v1.train.Saver(max_to_keep=30)

    with tf.compat.v1.Session() as sess:
        tf.compat.v1.global_variables_initializer().run()
        #接续上一次训练,上一次最新模型文件: ".tfmodel/model_mnist5.5.ckpt-45504"
        #第一次训练时屏蔽该语句
        #saver.restore(sess, ".tfmodel/model_mnist5.5.ckpt-50605")

        validate_feed = {x:x_vtr, y_: y_tr}
        test_feed = {x:x_vte, y_:y_te}

        for i in range(TRAINING_STEPS):
            start=(i*BATCH_SIZE)%dataset_size
            end=min(start+BATCH_SIZE,dataset_size)
            xs_tr = x_vtr[start:end]
            ys_tr = y_tr[start:end]

            xs_tr = sess.run(x_reshape_op, feed_dict={x_reshape:xs_tr})

            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs_tr, y_:ys_tr})

            if i % 100 == 0:
                print("After %d training step(s), loss on training " "batch is %g." \
                        %(step+1, loss_value))

                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                        global_step=global_step)

def main(argv=None):
    train()

if __name__=='__main__':
    tf.compat.v1.app.run()

文件mnist_eval.py:

import time
import tensorflow as tf
import mnist_inference
import mnist_train

EVAL_INTERVAL_SECS = 120

dataset_size = 60000

tf.compat.v1.disable_eager_execution()
#加载数据, 会自动下载数据文件mnist.npz, 一般保存在C:/user/你的用户名/.keras/datasets
mint=tf.keras.datasets.mnist
(x_tr,y_tr),(x_te,y_te)=mint.load_data()
#标签数据由类别向量转换为浮点型类别矩阵
y_tr = tf.keras.utils.to_categorical(y_tr,10,dtype='float32')
y_te = tf.keras.utils.to_categorical(y_te,10,dtype='float32')
#占位符
x_f_ph=tf.compat.v1.placeholder(tf.float32)
x_n_ph=tf.compat.v1.placeholder(tf.int32)
# 手写数字图片位图转换为浮点型向量, 并除以255做简单正规化
x_ftr_op = tf.cast(x_tr, tf.float32)
x_vtr_op = tf.reshape((x_f_ph-125) / 255.0, [x_n_ph, -1])
x_fte_op = tf.cast(x_te, tf.float32)
x_vte_op = tf.reshape((x_f_ph-125) / 255.0, [x_n_ph, -1])

with tf.compat.v1.Session() as sess:
    tf.compat.v1.global_variables_initializer().run()
    x_ftr = sess.run(x_ftr_op)
    x_vtr = sess.run(x_vtr_op, feed_dict={x_f_ph:x_ftr, x_n_ph:x_ftr.shape[0]})
    x_fte = sess.run(x_fte_op)
    x_vte = sess.run(x_vte_op, feed_dict={x_f_ph:x_fte, x_n_ph:x_fte.shape[0]})
    # print(x_ftr[0:1,1:10, 2:8])
    print(x_fte.shape)
    # print(x_vtr[0:1,0:784])
    print(x_vte.shape)

#显示一个位图, 显示图片时程序执行会暂停, 关闭图片后继续
# import matplotlib.pyplot as plt
# plt.imshow(x_fte[0],cmap="gray")
# plt.show()
print(x_fte[0:1,1:10])
print("y_te[0]: ", y_te[0])

# 推理

def evaluate():
    with tf.Graph().as_default() as g:
        x_reshape = tf.compat.v1.placeholder(tf.float32)
        x_reshape_op = tf.reshape(x_reshape, [
                       10000, 
                       mnist_inference.IMAGE_SIZE, 
                       mnist_inference.IMAGE_SIZE,
                       mnist_inference.NUM_CHANNELS])

        x = tf.compat.v1.placeholder(tf.float32,  [
                     # mnist_train.BATCH_SIZE,
                     10000, 
                     mnist_inference.IMAGE_SIZE,
                     mnist_inference.IMAGE_SIZE,
                     mnist_inference.NUM_CHANNELS], name='x-input')

        y_ = tf.compat.v1.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NODE], name='y-input')

        validate_feed = {x:x_vtr, y_: y_tr}
        test_feed = {x:x_vte, y_:y_te}

        y = mnist_inference.inference(x, False, None)

        correct_prediction = tf.math.equal(tf.math.argmax(y, 1), tf.math.argmax(y_, 1))
        accuracy = tf.compat.v1.reduce_mean(tf.cast(correct_prediction, tf.float32))

        variable_averages=tf.train.ExponentialMovingAverage(
                mnist_train.MOVING_AVERAGE_DECAY)

        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.compat.v1.train.Saver(variables_to_restore)

        while True:
            with tf.compat.v1.Session() as sess:
                tf.compat.v1.global_variables_initializer().run()
                ckpt = tf.compat.v1.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)

                xs_tr = x_vte[0:10000]
                ys_tr = y_te[0:10000]
                #xs_tr = x_vtr[50000:60000]
                #ys_tr = y_tr[50000:60000]

                xs_tr = sess.run(x_reshape_op, feed_dict={x_reshape:xs_tr})
                print(xs_tr.shape)

                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    print(ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    print("global_step: ", global_step)

                    accuracy_score = sess.run(accuracy, feed_dict={x:xs_tr, y_:ys_tr})
                    print("After %s training step(s), validation accuracy =%g" \
                            %(global_step, accuracy_score))
                else:
                    print("No checkpoint file found")
                    # return

            time.sleep(EVAL_INTERVAL_SECS)

def main(argv=None):
    evaluate()

if __name__=='__main__':
    tf.compat.v1.app.run()

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值