LeNet-5结构写Mnist识别(Tensorflow)

1.前向传播 mnist_inference.py

# -- coding: utf-8 --

import tensorflow as tf  

#输入与输出层
INPUT_NODE = 784  
OUTPUT_NODE = 10  
IMAGE_SIZE = 28
NUM_CHANNELS = 1
NUM_LABELS = 10

#第一个卷积层的尺寸和深度
CONV1_DEEP = 32
CONV1_SIZE = 5

#第二个卷积层的尺寸和深度
CONV2_DEEP = 64
CONV2_SIZE = 5
#全连接层的节点数
FC_SIZE = 512

#前向传播函数  
def inference(input_tensor, train, regularizer):
    #第一层——卷积层
    with tf.variable_scope('layer1-conv1'):
        conv1_weights = tf.get_variable("weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP], initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("bias", [CONV1_DEEP], initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1,1,1,1], padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))

    #第二层——池化层
    with tf.name_scope('layer2-pool1'):
        pool1 = tf.nn.max_pool(relu1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

    #第三层——卷积层
    with tf.variable_scope('layer3-conv2'):
        conv2_weights = tf.get_variable('weight', [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP], initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable("bias", [CONV2_DEEP], initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1,1,1,1], padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

    #第四层——池化层
    with tf.name_scope('layer4-pool2'):
        pool2 = tf.nn.max_pool(relu2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

    #第五层——全连接
    pool_shape = pool2.get_shape().as_list()
    nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]
    reshaped = tf.reshape(pool2, [pool_shape[0], nodes])

    with tf.variable_scope('layer5-fc1'):
        fc1_weights = tf.get_variable("weight", [nodes, FC_SIZE], initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(fc1_weights))
        fc1_biase = tf.get_variable('biase', [FC_SIZE], initializer=tf.constant_initializer(0.1))
        fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biase)
#        if train == 1: fc1 = tf.nn.dropout(fc1, 0.5)

    with tf.variable_scope('layer6-fc2'):
        fc2_weights = tf.get_variable("weight", [FC_SIZE, NUM_LABELS], initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(fc1_weights))
        fc2_biase = tf.get_variable('biase', [NUM_LABELS], initializer=tf.constant_initializer(0.1))
        logit = tf.matmul(fc1, fc2_weights) + fc2_biase

    return logit

    
        
    

2.训练代码 mnist_train.py

# -- coding: utf-8 --

import os

import numpy as np

import tensorflow as tf  
from tensorflow.examples.tutorials.mnist import input_data  

import mnist_inference  

#数据batch大小
BATCH_SIZE = 100  
  
#训练参数
LEARNING_RATE_BASE = 0.02  
LEARNING_RATE_DECAY = 0.99   
REGULARIZATION_RATE= 0.0001   
TRAINING_STEPS = 80000    
MOVING_AVERAGE_DECAY = 0.99   

#模型保存路径及文件名
MODEL_SAVE_PATH = "/model2/"
MODEL_NAME = "model.ckpt"

  
def train(mnist): 
#输入层和数据label 
#    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
    x = tf.placeholder(tf.float32, [BATCH_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS], name='x-input')  
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')

#前向传播结果y
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)  
    y = mnist_inference.inference(x, 1 ,regularizer)
    global_step = tf.Variable(0, trainable=False)  

#滑动平均模型
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)  
    variables_averages_op = variable_averages.apply(tf.trainable_variables())   

#计算交叉熵,并加入正则-->损失函数loss
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)   
#    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses')) 
    loss = cross_entropy_mean 
#学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)  
#train_step 梯度下降(学习率,损失函数,全局步数)  
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)  
#运算图控制,用train_op作集合  
    with tf.control_dependencies([train_step, variables_averages_op]):  
        train_op = tf.no_op(name='train')  

#持久化
    saver = tf.train.Saver()  
  
    with tf.Session() as sess:  
        tf.initialize_all_variables().run() 
  
        for i in range(TRAINING_STEPS): 
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs = np.reshape(xs, (BATCH_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS))
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:reshaped_xs, y_:ys})
#每1000轮保存一次         
            if i%1000 == 0:  
                print("After %d training step(s), loss on training batch is %g " %(step, loss_value))  
                saver.save(sess, "./model/model.ckpt")
  
  
def main(argv=None):  
    mnist = input_data.read_data_sets("mnist_data/", one_hot=True)  
    train(mnist)  
  
if __name__== '__main__':  
    tf.app.run()




  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值