Tensorflow with Batch normalization

这段代码展示了如何使用TensorFlow实现带Batch Normalization的MNIST手写数字识别模型。模型包括两个全连接层,每个层后都跟着Batch Normalization和ReLU激活。使用梯度下降优化器进行训练,并在训练过程中输出损失,最终计算测试集的准确性。
摘要由CSDN通过智能技术生成
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
class BNMnist:
    def __init__(self):
        print ("initialize......")
        self.epoches = 1000
        self.batch_size = 100
        self.mnist = input_data.read_data_sets('MNIST_DATA/',one_hot=True)
        self.learning_rate = 0.001
        self.regularizer_rate = 0.00001
    def Train(self):
        print ("Train......")
        weights = {
            "W1":tf.Variable(tf.random_normal(shape=[784,512],dtype=tf.float32,stddev=0.01)),
            "W2":tf.Variable(tf.random_normal(shape=[512,256],dtype=tf.float32,stddev=0.01)),
            "Output":tf.Variable(tf.random_normal(shape=[256,10],dtype=tf.float32,stddev=0.01))
            }
        bias = {
            'b1':tf.Variable(tf.zeros(shape=[512],dtype=tf.float32)),
            'b2':tf.Variable(tf.zeros(shape=[256],dtype=tf.float32)),
            'Output':tf.Variable(tf.zeros(shape=[10],dtype=tf.float32)),
            }
        x = tf.placeholder(dtype=tf.float32,shape=[None,784])
        y = tf.placeholder(dtype=tf.float32,shape=[None,10])
        
        with tf.variable_scope("BN1") as scope:
            alpha1 = tf.Variable(tf.ones(shape=[512],dtype= tf.float32))
            beta1 = tf.Variable(tf.zeros(shape=[512],dtype=tf.float32))
        with tf.variable_scope("BN2"):
            alpha2 = tf.Variable(tf.ones(shape=[256],dtype= tf.float32))
            beta2 = tf.Variable(tf.zeros(shape=[256],dtype=tf.float32))

        with tf.name_scope("layer1") as scope:
            Output1 = tf.nn.bias_add(tf.matmul(x, weights['W1']),bias['b1'])
            batch_mean1,batch_var1 = tf.nn.moments(Output1,[0])
            Output1_BN = tf.multiply((Output1-batch_mean1)/tf.sqrt(batch_var1+1e-3),alpha1) + beta1
            Output1_relu = tf.nn.relu(Output1_BN)


        with tf.name_scope("layer2") as scope:
            Output2 = tf.nn.bias_add(tf.matmul(Output1_relu,weights['W2']),bias['b2'])
            batch_mean2,batch_var2=tf.nn.moments(Output2,[0])
            
            Output2_BN = tf.multiply((Output2-batch_mean2)/tf.sqrt(batch_var2+1e-3),alpha2) + beta2
            Output2_relu = tf.nn.relu(Output2_BN)
            
        with tf.name_scope("Output") as scope:
            Output = tf.nn.bias_add(tf.matmul(Output2_relu,weights["Output"]),bias['Output'])
            probs = tf.nn.softmax(Output)
            
        regularizer = tf.contrib.layers.l2_regularizer(self.regularizer_rate);
        with tf.name_scope("Loss") as scope:
            loss = tf.reduce_sum(-tf.multiply(tf.log(probs),y)) + regularizer(weights['W1']) + regularizer(weights['W2']);
        
        with tf.name_scope("Accuracy") as scope:
            accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(probs,1),tf.argmax(y,1)),tf.float32))

        optimizer_op = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(loss)
        init_op = tf.global_variables_initializer()
        loss_scalar = tf.summary.scalar("loss",loss)
        accuracy_scalar = tf.summary.scalar('accuracy',accuracy)

        with tf.Session() as sess:
            sess.run(init_op);
            summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter("./log",sess.graph)
            
            for i in range(self.epoches):
                batch_x, batch_y = self.mnist.train.next_batch(self.batch_size)
                
                train_feed = {x:batch_x,y:batch_y}
                sess.run(optimizer_op,feed_dict = train_feed)

                if (i % 100 == 0):
                    print ("loss:",loss.eval(feed_dict=train_feed),";epoch:",i)
                summary_output = sess.run(summary_op,feed_dict=train_feed)
                summary_writer.add_summary(summary_output,i)
            
            test_feed = {x:self.mnist.test.images,y:self.mnist.test.labels}
            print ("accuracy:",accuracy.eval(feed_dict=test_feed))
            
            summary_writer.close()
if __name__ == "__main__":
    bnMnist = BNMnist();
    bnMnist.Train()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值