# 深度学习框架tensorflow学习与应用7（改变模型和优化器提升准确率）

#训练


# coding: utf-8

# In[ ]:

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

# 载入数据集

# 每个批次的大小
batch_size = 100
# 计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size

# 定义两个placeholder
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])

# 创建一个简单的神经网络
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x, W) + b)

# 二次代价函数
# loss = tf.reduce_mean(tf.square(y-prediction))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
# 使用梯度下降法
# train_step_MomentumOptimizer = tf.train.MomentumOptimizer(1e-3, 0.9).minimize(loss)
# train_step_RMSPropOptimizer = tf.train.RMSPropOptimizer(0.003, 0.9).minimize(loss)
# 初始化变量
init = tf.global_variables_initializer()

# 结果存放在一个布尔型列表中
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))  # argmax返回一维张量中最大的值所在的位置
# 求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

with tf.Session() as sess:
sess.run(init)
for epoch in range(51):
for batch in range(n_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)

acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})
print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))



# 创建一个简单的神经网络
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x, W) + b)
# 使用梯度下降法
train_step_AdamOptimizer = tf.train.AdamOptimizer(1e-3).minimize(loss)

Iter 0,Testing Accuracy 0.9014
Iter 1,Testing Accuracy 0.912
Iter 2,Testing Accuracy 0.9163
Iter 3,Testing Accuracy 0.9191
Iter 4,Testing Accuracy 0.921
Iter 5,Testing Accuracy 0.9257
Iter 6,Testing Accuracy 0.9249
Iter 7,Testing Accuracy 0.9273
Iter 8,Testing Accuracy 0.9285
Iter 9,Testing Accuracy 0.9298
Iter 10,Testing Accuracy 0.9278
Iter 11,Testing Accuracy 0.9303
Iter 12,Testing Accuracy 0.9298
Iter 13,Testing Accuracy 0.9303
Iter 14,Testing Accuracy 0.9309
Iter 15,Testing Accuracy 0.931
Iter 16,Testing Accuracy 0.931
Iter 17,Testing Accuracy 0.931
Iter 18,Testing Accuracy 0.9315
Iter 19,Testing Accuracy 0.932
Iter 20,Testing Accuracy 0.9318
Iter 21,Testing Accuracy 0.932
Iter 22,Testing Accuracy 0.931
Iter 23,Testing Accuracy 0.9326
Iter 24,Testing Accuracy 0.9322
Iter 25,Testing Accuracy 0.9324
Iter 26,Testing Accuracy 0.9328
Iter 27,Testing Accuracy 0.9317
Iter 28,Testing Accuracy 0.9332
Iter 29,Testing Accuracy 0.9332
Iter 30,Testing Accuracy 0.9333
Iter 31,Testing Accuracy 0.9339
Iter 32,Testing Accuracy 0.9332
Iter 33,Testing Accuracy 0.9335
Iter 34,Testing Accuracy 0.9341
Iter 35,Testing Accuracy 0.9337
Iter 36,Testing Accuracy 0.9339
Iter 37,Testing Accuracy 0.9342
Iter 38,Testing Accuracy 0.9334
Iter 39,Testing Accuracy 0.9344
Iter 40,Testing Accuracy 0.9338
Iter 41,Testing Accuracy 0.9343
Iter 42,Testing Accuracy 0.9341
Iter 43,Testing Accuracy 0.9338
Iter 44,Testing Accuracy 0.9338
Iter 45,Testing Accuracy 0.9336
Iter 46,Testing Accuracy 0.9337
Iter 47,Testing Accuracy 0.9344
Iter 48,Testing Accuracy 0.9339
Iter 49,Testing Accuracy 0.9337
Iter 50,Testing Accuracy 0.934

• 创建两个隐藏层
#创建一个简单的神经网络
W1 = tf.Variable(tf.truncated_normal([784, 500], stddev=0.1))
b1 = tf.Variable(tf.zeros([500])+0.1)
L1 = tf.nn.tanh(tf.matmul(x, W1)+b1)
L1_drop = tf.nn.dropout(L1, keep_prob)

W2 = tf.Variable(tf.truncated_normal([500, 300], stddev=0.1))
b2 = tf.Variable(tf.zeros([300])+0.1)
L2 = tf.nn.tanh(tf.matmul(L1_drop, W2)+b2)
L2_drop = tf.nn.dropout(L2, keep_prob)

W3 = tf.Variable(tf.truncated_normal([300,10], stddev=0.1))
b3 = tf.Variable(tf.zeros([10])+0.1)
prediction = tf.nn.softmax(tf.matmul(L2_drop, W3)+b3)


• 变化的学习lv，为什么要这样使用呢？因为避免来回震荡而导致到不了最低点。不知道大家是否懂？所以开始时候学习lv可以大点，但约到后面应该越接近最优解的时候其学习率应该要变小点，所以定义了一个变量lr = tf.Variable(0.001, dtype=tf.float32)

通过assign进行更新lr  sess.run(tf.assign(lr, 0.001 * (0.95 ** epoch)))

• #训练
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
with tf.Session() as sess:
sess.run(init)
for epoch in range(51):
sess.run(tf.assign(lr, 0.001 * (0.95 ** epoch)))
for batch in range(n_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.0})

learning_rate = sess.run(lr)
acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0})
print("Iter " + str(epoch) + ", Testing Accuracy= " + str(acc) + ", Learning Rate= " + str(learning_rate))

Iter 0, Testing Accuracy= 0.9517, Learning Rate= 0.001
Iter 1, Testing Accuracy= 0.9597, Learning Rate= 0.00095
Iter 2, Testing Accuracy= 0.9673, Learning Rate= 0.0009025
Iter 3, Testing Accuracy= 0.9732, Learning Rate= 0.000857375
Iter 4, Testing Accuracy= 0.9725, Learning Rate= 0.00081450626
Iter 5, Testing Accuracy= 0.9737, Learning Rate= 0.0007737809
Iter 6, Testing Accuracy= 0.9745, Learning Rate= 0.0007350919
Iter 7, Testing Accuracy= 0.9755, Learning Rate= 0.0006983373
Iter 8, Testing Accuracy= 0.9761, Learning Rate= 0.0006634204
Iter 9, Testing Accuracy= 0.9787, Learning Rate= 0.0006302494
Iter 10, Testing Accuracy= 0.978, Learning Rate= 0.0005987369
Iter 11, Testing Accuracy= 0.9763, Learning Rate= 0.0005688001
Iter 12, Testing Accuracy= 0.9799, Learning Rate= 0.0005403601
Iter 13, Testing Accuracy= 0.9782, Learning Rate= 0.0005133421
Iter 14, Testing Accuracy= 0.9806, Learning Rate= 0.000487675
Iter 15, Testing Accuracy= 0.9794, Learning Rate= 0.00046329122
Iter 16, Testing Accuracy= 0.9807, Learning Rate= 0.00044012666
Iter 17, Testing Accuracy= 0.9809, Learning Rate= 0.00041812033
Iter 18, Testing Accuracy= 0.9811, Learning Rate= 0.00039721432
Iter 19, Testing Accuracy= 0.9786, Learning Rate= 0.0003773536
Iter 20, Testing Accuracy= 0.9808, Learning Rate= 0.00035848594
Iter 21, Testing Accuracy= 0.9817, Learning Rate= 0.00034056162
Iter 22, Testing Accuracy= 0.9823, Learning Rate= 0.00032353355
Iter 23, Testing Accuracy= 0.9821, Learning Rate= 0.00030735688
Iter 24, Testing Accuracy= 0.9807, Learning Rate= 0.000291989
Iter 25, Testing Accuracy= 0.9805, Learning Rate= 0.00027738957
Iter 26, Testing Accuracy= 0.9813, Learning Rate= 0.0002635201
Iter 27, Testing Accuracy= 0.9822, Learning Rate= 0.00025034408
Iter 28, Testing Accuracy= 0.9817, Learning Rate= 0.00023782688
Iter 29, Testing Accuracy= 0.9814, Learning Rate= 0.00022593554
Iter 30, Testing Accuracy= 0.9814, Learning Rate= 0.00021463877
Iter 31, Testing Accuracy= 0.9809, Learning Rate= 0.00020390682
Iter 32, Testing Accuracy= 0.9819, Learning Rate= 0.00019371149
Iter 33, Testing Accuracy= 0.9824, Learning Rate= 0.0001840259
Iter 34, Testing Accuracy= 0.9815, Learning Rate= 0.00017482461
Iter 35, Testing Accuracy= 0.9807, Learning Rate= 0.00016608338
Iter 36, Testing Accuracy= 0.9821, Learning Rate= 0.00015777921
Iter 37, Testing Accuracy= 0.982, Learning Rate= 0.00014989026
Iter 38, Testing Accuracy= 0.9819, Learning Rate= 0.00014239574
Iter 39, Testing Accuracy= 0.9816, Learning Rate= 0.00013527596
Iter 40, Testing Accuracy= 0.9819, Learning Rate= 0.00012851215
Iter 41, Testing Accuracy= 0.9821, Learning Rate= 0.00012208655
Iter 42, Testing Accuracy= 0.9822, Learning Rate= 0.00011598222
Iter 43, Testing Accuracy= 0.9821, Learning Rate= 0.00011018311
Iter 44, Testing Accuracy= 0.9814, Learning Rate= 0.000104673956
Iter 45, Testing Accuracy= 0.982, Learning Rate= 9.944026e-05
Iter 46, Testing Accuracy= 0.9813, Learning Rate= 9.446825e-05
Iter 47, Testing Accuracy= 0.9821, Learning Rate= 8.974483e-05
Iter 48, Testing Accuracy= 0.9822, Learning Rate= 8.525759e-05
Iter 49, Testing Accuracy= 0.9821, Learning Rate= 8.099471e-05
Iter 50, Testing Accuracy= 0.9823, Learning Rate= 7.6944976e-05