简单全连接神经网络--MNIST

使用全连接神经网络进行手写数字识别,这个效果比CNN要差,仅做练习。

1、mnist_inference.py

#coding:utf-8

import tensorflow as tf

#定义神经网络结构相关的参数
INPURT_NODE = 784
OUTPUT_NIDE = 10
LAYER1_NODE = 500

def get_weight_variable(shape,regularizer):
    weights = tf.get_variable("weight",shape,initializer=tf.truncated_normal_initializer(stddev=0.1))

    if regularizer != None:
        tf.add_to_collection("losses",regularizer(weights))

    return weights


def inference(input_tensor,regularizer):
    with tf.variable_scope("layer1"):
        weights = get_weight_variable([INPURT_NODE,LAYER1_NODE],regularizer)

        biases = tf.get_variable("biases",[LAYER1_NODE],initializer=tf.constant_initializer(0.0))

        layer1 = tf.nn.relu(tf.matmul(input_tensor,weights)+biases)


    with tf.variable_scope("layer2"):
        weights = get_weight_variable([LAYER1_NODE,OUTPUT_NIDE],regularizer)
        biases = tf.get_variable("biases",[OUTPUT_NIDE],initializer=tf.constant_initializer(0.0))
        layer2 = tf.matmul(layer1,weights)+biases


    return layer2

2、train.py

#coding:utf-8

import tensorflow as tf
import os

from tensorflow.examples.tutorials.mnist import input_data

#加载刚刚些的前向传播过程
import mnist_inference



#配置神经网络的参数
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8#指数衰减基础学习率
LEARNING_RATE_DECAY = 0.99#衰减率
REGULARAZTION_RATE = 0.0001#正则化的权重
TRAIN_STEP = 30000
MOVING_AVERAGE_DECAY = 0.99#滑动平均率

MODEL_SAVE_PATH = "./model"
MODEL_NAME = "model.ckpt"

def train(mnist):
    x = tf.placeholder(tf.float32,[None,mnist_inference.INPURT_NODE],name="x-input")
    y_ = tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NIDE],name="y-input")

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)

    y = mnist_inference.inference(x,regularizer)

    global_step = tf.Variable(0,trainable=False)#设置global_step为不可训练数值,在训练过程中它不进行相应的更新

    #对w,b进行滑动平均操作
    variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)#对滑动平均函数进行输入滑动平均率以及步数
    variable_average_op = variable_average.apply(tf.trainable_variables())#对所以可训练的参数进行滑动平均操作

    #计算损失函数
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels = y_,logits = y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    loss = cross_entropy_mean+tf.add_n(tf.get_collection("losses"))#这里计算collection里的所有的和。之前把w正则化的值放在了collection里

    #对 学习率 进行指数衰减
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY) 

    #定义训练过程
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)#每当进行一次训练global_step会加1

    #一次进行多个操作,既进行反向传播更新神经网络中的参数,又更新每一个参数的滑动平均值(滑动平均是影子操作)
    with tf.control_dependencies([train_step,variable_average_op]):
        train_op = tf.no_op(name="train")


    #保存操作
    saver = tf.train.Saver()

    #启动程序

    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAIN_STEP):
            xs,ys = mnist.train.next_batch(BATCH_SIZE)
            _,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})

            #每1000轮保存一次模型
            if i%1000 ==0:
                print "step ",step,"   ","loss  ",loss_value
                saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step = global_step)


def main(argv=None):
    mnist = input_data.read_data_sets("/tmp/data",one_hot=True)
    train(mnist)

if __name__ =="__main__":
    tf.app.run()


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值