tensorflow实践——MNIST(2),示范了三种优化:移动平均 指数衰减学习率和正则化,测试集正确率98.4%以上

tensorflow实践——MNIST(2),示范了三种优化:移动平均 指数衰减学习率和正则化,测试集正确率98.4%以上。

初学者最需的就是可以正常运行的代码,下面的代码是《Tensorflow 实战Google深度学习框架》5.2.1小节的例子,由于环境不同做了一些修改,可以正常运行。原代码中注释都省略了,详细注释参考原书籍。

程序运行结果:

完整代码如下:

import tensorflow as tf

INPUT_NODE=784
OUTPUT_NODE=10

LAYER1_NODE=500
BATCH_SIZE=100

LEARNING_RATE_BASE=0.8
LEARNING_RATE_DECAY=0.99

REGULARIZATION_RATE=0.0001
TRAINING_STEPS=30000
MOVING_AVERAGE_DECAY=0.99
dataset_size = 60000

tf.compat.v1.disable_eager_execution()
#加载数据, 会自动下载数据文件mnist.npz, 一般保存在C:/user/你的用户名/.keras/datasets
mint=tf.keras.datasets.mnist
(x_tr,y_tr),(x_te,y_te)=mint.load_data()
#标签数据由类别向量转换为浮点型类别矩阵
y_tr = tf.keras.utils.to_categorical(y_tr,10,dtype='float32')
y_te = tf.keras.utils.to_categorical(y_te,10,dtype='float32')
#占位符
x_f_ph=tf.compat.v1.placeholder(tf.float32)
x_n_ph=tf.compat.v1.placeholder(tf.int32)
# 手写数字图片位图转换为浮点型向量, 并除以255做简单正规化
x_ftr_op = tf.cast(x_tr, tf.float32)
x_vtr_op = tf.reshape(x_f_ph / 255.0, [x_n_ph, -1])
x_fte_op = tf.cast(x_te, tf.float32)
x_vte_op = tf.reshape(x_f_ph / 255.0, [x_n_ph, -1])

with tf.compat.v1.Session() as sess:
    tf.compat.v1.global_variables_initializer().run()
    x_ftr = sess.run(x_ftr_op)
    x_vtr = sess.run(x_vtr_op, feed_dict={x_f_ph:x_ftr, x_n_ph:x_ftr.shape[0]})
    x_fte = sess.run(x_fte_op)
    x_vte = sess.run(x_vte_op, feed_dict={x_f_ph:x_fte, x_n_ph:x_fte.shape[0]})
    # print(x_ftr[0:1,1:10, 2:8])
    print(x_fte.shape)
    # print(x_vtr[0:1,0:784])
    print(x_vte.shape)

#显示一个位图, 显示图片时程序执行会暂停, 关闭图片后继续
import matplotlib.pyplot as plt
plt.imshow(x_fte[0],cmap="gray")
plt.show()
print(x_fte[0:1,1:10])
print("y_te[0]: ", y_te[0])

# 推理
def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
    if avg_class == None:
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
        return tf.matmul(layer1, weights2) + biases2
    else:
        layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(weights1)) +
                avg_class.average(biases1))
        return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)

def train():
    x = tf.compat.v1.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
    y_ = tf.compat.v1.placeholder(tf.float32,[None,OUTPUT_NODE], name='y-input')

    weights1 = tf.Variable(tf.random.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))
    biases1 = tf.Variable(tf.constant(0.1,shape=[LAYER1_NODE]))

    weights2 = tf.Variable(tf.random.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
    biases2 = tf.Variable(tf.constant(0.1,shape=[OUTPUT_NODE]))

    y = inference(x, None, weights1, biases1, weights2, biases2)

    global_step = tf.Variable(0, trainable=False)
    variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)

    variable_averages_op = variable_averages.apply(tf.compat.v1.trainable_variables())

    average_y = inference(x, variable_averages, weights1, biases1, weights2, biases2)

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.math.argmax(y_, 1))

    cross_entropy_mean = tf.compat.v1.reduce_mean(cross_entropy)

    regularizer = tf.keras.regularizers.L2(REGULARIZATION_RATE)
    regularization = regularizer(weights1) + regularizer(weights2)
    loss = cross_entropy_mean + regularization
    # loss = cross_entropy_mean

    learning_rate = tf.compat.v1.train.exponential_decay(LEARNING_RATE_BASE,
                      global_step, dataset_size / BATCH_SIZE, LEARNING_RATE_DECAY)

    train_step=tf.compat.v1.train.GradientDescentOptimizer(learning_rate)\
                   .minimize(loss, global_step=global_step)

    with tf.control_dependencies([train_step, variable_averages_op]):
            train_op = tf.no_op(name='train')

    correct_prediction = tf.math.equal(tf.math.argmax(average_y, 1), tf.math.argmax(y_, 1))
    accuracy = tf.compat.v1.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.compat.v1.Session() as sess:
        tf.compat.v1.global_variables_initializer().run()
    
        print(dataset_size)

        validate_feed = {x:x_vtr, y_: y_tr}
        test_feed = {x:x_vte, y_:y_te}

        for i in range(TRAINING_STEPS):
            if i % 1000 == 0:
                validate_acc = sess.run(accuracy, feed_dict=validate_feed)
                print("After %d training step(s), validation accuracy " "using average model is %g" \
                        %(i, validate_acc))

            start=(i*BATCH_SIZE)%dataset_size
            end=min(start+BATCH_SIZE,dataset_size)
            xs_tr = x_vtr[start:end]
            ys_tr = y_tr[start:end]

            sess.run(train_op, feed_dict={x:xs_tr, y_:ys_tr})

        test_acc = sess.run(accuracy, feed_dict=test_feed)
        print("After %d training step(s), test accuracy using average " "model is %g" \
                    %(TRAINING_STEPS, test_acc))

def main(argv=None):
    train()

if __name__=='__main__':
    tf.compat.v1.app.run()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值