(MNIST_data_nn)手写数字识别__一层神经网络__代码

引入库:

import tensorflow as tf
# 把tensorflow\examples\tutorials\mnist中input_data.py放到当前目录
import input_data		
from time import time

print(tf.__version__)
# tensorflow 2.2.0

导入数据:

mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)

超参数设置:

# 训练参数
train_epochs = 20
batch_size = 50     # 批大小
total_batch = int(mnist.train.num_examples / batch_size)    # 批次轮数
display_step = 1
learning_rate = 0.01

# 控制学习率衰减参数
lr_decay_switch = True  # 学习率衰减开关
decay_rate = 0.5   # 学习率衰减率
global_step = 20
decay_step = 5

构造模型:

tf.compat.v1.disable_eager_execution()
x = tf.compat.v1.placeholder(tf.float32, [None, 784], name='x')
y = tf.compat.v1.placeholder(tf.float32, [None, 10], name='y')

# 隐藏层神经元数量
H1_NN = 256

W1 = tf.Variable(tf.compat.v1.random_normal([784, H1_NN]))
B1 = tf.Variable(tf.zeros([H1_NN]))

Y1 = tf.nn.relu(tf.matmul(x, W1) + B1)

W2 = tf.Variable(tf.compat.v1.random_normal([H1_NN, 10]))
B2 = tf.Variable(tf.zeros([10]))

forward = tf.matmul(Y1, W2) + B2
pred = tf.nn.softmax(forward)


correct_prediction = tf.equal(tf.argmax(y, axis=1), tf.argmax(pred, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))    # 识别率


loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=forward,
                                                                       labels=y))

# Adam优化器
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(loss_function)

训练模型:


# 记录训练开始时间
start_time = time()

sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())

for epoch in range(train_epochs):
    for batch in range(total_batch):
        xs, ys = mnist.train.next_batch(batch_size)
        sess.run(optimizer, feed_dict={x: xs,
                                       y: ys})

        # total_batch个批次训练完成后,使用验证数据计算误差与准确率
    loss, acc = sess.run([loss_function, accuracy], feed_dict={x: mnist.validation.images,
                                                                   y: mnist.validation.labels})

    if (epoch+1) % display_step == 0:
        print("Train Epoch:", "%02d" % (epoch+1),
              "Loss=", "{:.9f}".format(loss),
              "Accuary=", "{:.4}".format(acc))

    # 学习率衰减
    if lr_decay_switch:
        learning_rate = tf.compat.v1.train.exponential_decay(learning_rate=learning_rate,
                                                             global_step=global_step,
                                                             decay_steps=decay_step,
                                                             decay_rate=decay_rate,
                                                             staircase=True)
        print("learning rate:{:.16f}".format(sess.run(learning_rate)))

# 显示运行总时间
duration = time() - start_time
print("Train Finished takes:", "{:.2f}".format(duration), "s")

结果:

Train Epoch: 01 Loss= 1.205354691 Accuary= 0.941
learning rate:0.0006249999860302
Train Epoch: 02 Loss= 0.653367460 Accuary= 0.9466
learning rate:0.0000390624991269
Train Epoch: 03 Loss= 0.509876728 Accuary= 0.957
learning rate:0.0000024414061954
Train Epoch: 04 Loss= 0.397028536 Accuary= 0.9586
learning rate:0.0000001525878872
Train Epoch: 05 Loss= 0.347795546 Accuary= 0.9632
learning rate:0.0000000095367430
Train Epoch: 06 Loss= 0.353705555 Accuary= 0.9616
learning rate:0.0000000005960464
Train Epoch: 07 Loss= 0.357088804 Accuary= 0.9614
learning rate:0.0000000000372529
Train Epoch: 08 Loss= 0.333000481 Accuary= 0.9632
learning rate:0.0000000000023283
Train Epoch: 09 Loss= 0.358141184 Accuary= 0.964
learning rate:0.0000000000001455
Train Epoch: 10 Loss= 0.403971523 Accuary= 0.9664
learning rate:0.0000000000000091
Train Epoch: 11 Loss= 0.305930674 Accuary= 0.9698
learning rate:0.0000000000000006
Train Epoch: 12 Loss= 0.390651315 Accuary= 0.9658
learning rate:0.0000000000000000
Train Epoch: 13 Loss= 0.418165475 Accuary= 0.9656
learning rate:0.0000000000000000
Train Epoch: 14 Loss= 0.392932385 Accuary= 0.9672
learning rate:0.0000000000000000
Train Epoch: 15 Loss= 0.416402251 Accuary= 0.9684
learning rate:0.0000000000000000
Train Epoch: 16 Loss= 0.421397567 Accuary= 0.9658
learning rate:0.0000000000000000
Train Epoch: 17 Loss= 0.370880157 Accuary= 0.9708
learning rate:0.0000000000000000
Train Epoch: 18 Loss= 0.444905162 Accuary= 0.9706
learning rate:0.0000000000000000
Train Epoch: 19 Loss= 0.424002051 Accuary= 0.9728
learning rate:0.0000000000000000
Train Epoch: 20 Loss= 0.433843017 Accuary= 0.971
learning rate:0.0000000000000000
Train Finished takes: 21.73 s

进程已结束,退出代码 0

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值