(Tensorflow学习)MNIST手写体识别 卷积神经网络LeNet-5模型实现

何为深度学习?

一类通过多层非线性变换对高复杂性数据建模算法的合集

 

上一篇文章,我用单隐藏层的全连接神经网络,实现对MNIST手写体的识别,测试集上只能达到98%正确率。

如果用卷积神经网络,可以达到更好的效果。

卷积神经网络主要是在全连接网络前加上了卷积层、池化层,个数不定

使用全连接神经网络最大的问题是全连接层的参数太多,而用上卷积网络可以大大的减小参数量,并且卷积层可以更好的归纳图像特征

全连接网络

卷积神经网络

 

这边用tensorflow实现LeNet-5网络。这边MNIST数据集是28*28的,而LeNet-5是输入32*32的,所以这边网络有一定修改。

第一层 卷积层 滤波器 5*5*32

第二层 池化层 最大池化层 2*2

第三层 卷积层 滤波器 5*5*64

第四层 池化层 最大池化层 2*2

第五层 全连接 3136 ->512

第六层 全连接 512 ->10

# LeNet-5

import tensorflow as tf

INPUT_NODE=784
OUTPUT_NODE=10

IMAGE_SIZE=28
NUM_CHANNELS=1
NUM_LABELS=10

CONV1_DEEP=32
CONV1_SIZE=5

CONV2_DEEP=64
CONV2_SIZE=5

FC_SIZE=512

def inference(input_tensor,train,regularizer):
    with tf.variable_scope('layer1-conv1'):
        conv1_weights=tf.get_variable("weight",[CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases=tf.get_variable("bias",[CONV1_DEEP],initializer=tf.constant_initializer(0.0))

        conv1=tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding='SAME')
        relu1=tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))

    with tf.name_scope('layer2-pool1'):
        pool1=tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

    with tf.variable_scope('layer3-conv2'):
        conv2_weights=tf.get_variable("weight",[CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases=tf.get_variable("bias",[CONV2_DEEP],initializer=tf.constant_initializer(0.0))

        conv2=tf.nn.conv2d(pool1,conv2_weights,strides=[1,1,1,1],padding='SAME')
        relu2=tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))

    with tf.name_scope('layer4-pool2'):
        pool2=tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

    pool_shape=pool2.get_shape().as_list()
    nodes=pool_shape[1]*pool_shape[2]*pool_shape[3]
    reshaped=tf.reshape(pool2,[pool_shape[0],nodes])

    with tf.variable_scope('layer5-fc1'):
        fc1_weights=tf.get_variable("weight",[nodes,FC_SIZE],
                                    initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer!=None:
            tf.add_to_collection("losses",regularizer(fc1_weights))
        fc1_biases=tf.get_variable("bias",[FC_SIZE],initializer=tf.constant_initializer(0.1))
        fc1=tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biases)
        if train:fc1=tf.nn.dropout(fc1,0.5)

    with tf.variable_scope('layer6-fc2'):
        fc2_weights=tf.get_variable("weight",[FC_SIZE,NUM_LABELS],
                                    initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer!=None:
            tf.add_to_collection('losses',regularizer(fc2_weights))
        fc2_biases=tf.get_variable("bias",[NUM_LABELS],initializer=tf.constant_initializer(0.1))

    logit=tf.matmul(fc1,fc2_weights)+fc2_biases

    return logit

训练程序

import tensorflow as tf
import mnist_inference
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np


BATCH_SIZE=100
REGULARAZTION=0.0001
TRAINING_STEPS=30000

def train(mnist):
    x=tf.placeholder(tf.float32,[BATCH_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS],name='x-input')
    y_=tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NODE],name='y-input')
    regularizer=tf.contrib.layers.l2_regularizer(REGULARAZTION)
    y=mnist_inference.inference(x,True,regularizer)

    cross_entrop=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.arg_max(y_,1))
    cross_entrop_mean=tf.reduce_mean(cross_entrop)
    loss=cross_entrop_mean+tf.add_n(tf.get_collection('losses'))
    train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)

    saver=tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAINING_STEPS):
            xs,ys=mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs=np.reshape(xs,(BATCH_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS))
            loss_value,step=sess.run([loss,train_step],feed_dict={x:reshaped_xs,y_:ys})
            if i%1000==0:
                print("After %d training steps,loss on training batch is %g" % (i,loss_value))

        saver.save(sess,"D:\myPython\MNIST_CNN\model.ckpt")

mnist=input_data.read_data_sets("D:/mnist",one_hot=True)
train(mnist)

网络复杂了,训练的比较慢,loss一直在减小

测试程序

import tensorflow as tf
import mnist_inference
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np

BATCH_SIZE=5000

def evaluate(mnist):
        x = tf.placeholder(tf.float32, [BATCH_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
        y=mnist_inference.inference(x,False,None)

        correct_predition=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
        accuracy=tf.reduce_mean(tf.cast(correct_predition,tf.float32))

        saver=tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess,"D:\myPython\MNIST_CNN\model.ckpt")
            for i in range(5):
                xs, ys = mnist.test.next_batch(BATCH_SIZE)
                reshaped_xs = np.reshape(xs, (
                BATCH_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS))
                test_feed = {x: reshaped_xs, y_: ys}
                accuracy_score = sess.run(accuracy, feed_dict=test_feed)
                print("test accuracy is %g" % (accuracy_score))

mnist=input_data.read_data_sets("D:/mnist",one_hot=True)
evaluate(mnist)

MNSIT数据集 训练集有55000个,测试集10000个。这边测试集每5000个一测。正确率99%以上

OK

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值