#运行了几轮BATCH_SIZE的计数器,初值给0, 设为不被训练 global_step = tf.Variable(0, trainable=False) #定义指数下降学习率 learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, LEARNING_RATE_STEP, LEARNING_RATE_DECAY, staircase=True) #定义待优化参数,初值给10 w = tf.Variable(tf.constant(10, dtype=tf.float32)) #定义损失函数loss loss = tf.square(w+1) #定义反向传播方法 train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) #生成会话,训练40轮 with tf.Session() as sess: init_op=tf.global_variables_initializer() sess.run(init_op) for i in range(40): sess.run(train_step) learning_rate_val = sess.run(learning_rate) global_step_val = sess.run(global_step) # global_step也要放在会话中运行,要不然怎么更新 w_val = sess.run(w) loss_val = sess.run(loss) print(“After %s steps: global_step is %f, w is %f, learning rate is %f, loss is %f” % (i, global_step_val, w_val, learning_rate_val, loss_val))
运行显示结果为
After 0 steps: global_step is 1.000000, w is 7.800000, learning rate is 0.099000, loss is 77.440002
After 1 steps: global_step is 2.000000, w is 6.057600, learning rate is 0.098010, loss is 49.809719
After 2 steps: global_step is 3.000000, w is 4.674170, learning rate is 0.097030, loss is 32.196201
After 3 steps: global_step is 4.000000, w is 3.573041, learning rate is 0.096060, loss is 20.912704
After 4 steps: global_step is 5.000000, w is 2.694472, learning rate is 0.095099, loss is 13.649124
After 5 steps: global_step is 6.000000, w is 1.991791, learning rate is 0.094148, loss is 8.950812
After 6 steps: global_step is 7.000000, w is 1.428448, learning rate is 0.093207, loss is 5.897362
After 7 steps: global_step is 8.000000, w is 0.975754, learning rate is 0.092274, loss is 3.903603
After 8 steps: global_step is 9.000000, w is 0.611131, learning rate is 0.091352, loss is 2.595742
After 9 steps: global_step is 10.000000, w is 0.316771, learning rate is 0.090438, loss is 1.733887
After 10 steps: global_step is 11.000000, w is 0.078598, learning rate is 0.089534, loss is 1.163375
After 11 steps: global_step is 12.000000, w is -0.114544, learning rate is 0.088638, loss is 0.784033
After 12 steps: global_step is 13.000000, w is -0.271515, learning rate is 0.087752, loss is 0.530691
After 13 steps: global_step is 14.000000, w is -0.399367, learning rate is 0.086875, loss is 0.360760
After 14 steps: global_step is 15.000000, w is -0.503726, learning rate is 0.086006, loss is 0.246287
After 15 steps: global_step is 16.000000, w is -0.589091, learning rate is 0.085146, loss is 0.168846
After 16 steps: global_step is 17.000000, w is -0.659066, learning rate is 0.084294, loss is 0.116236
After 17 steps: global_step is 18.000000, w is -0.716543, learning rate is 0.083451, loss is 0.080348
After 18 steps: global_step is 19.000000, w is -0.763853, learning rate is 0.082617, loss is 0.055765
After 19 steps: global_step is 20.000000, w is -0.802872, learning rate is 0.081791, loss is 0.038859
After 20 steps: global_step is 21.000000, w is -0.835119, learning rate is 0.080973, loss is 0.027186
After 21 steps: global_step is 22.000000, w is -0.861821, learning rate is 0.080163, loss is 0.019094
After 22 steps: global_step is 23.000000, w is -0.883974, learning rate is 0.079361, loss is 0.013462
After 23 steps: global_step is 24.000000, w is -0.902390, learning rate is 0.078568, loss is 0.009528
After 24 steps: global_step is 25.000000, w is -0.917728, learning rate is 0.077782, loss is 0.006769
After 25 steps: global_step is 26.000000, w is -0.930527, learning rate is 0.077004, loss is 0.004827
After 26 steps: global_step is 27.000000, w is -0.941226, learning rate is 0.076234, loss is 0.003454
After 27 steps: global_step is 28.000000, w is -0.950187, learning rate is 0.075472, loss is 0.002481
After 28 steps: global_step is 29.000000, w is -0.957706, learning rate is 0.074717, loss is 0.001789
After 29 steps: global_step is 30.000000, w is -0.964026, learning rate is 0.073970, loss is 0.001294
After 30 steps: global_step is 31.000000, w is -0.969348, learning rate is 0.073230, loss is 0.000940
After 31 steps: global_step is 32.000000, w is -0.973838, learning rate is 0.072498, loss is 0.000684
After 32 steps: global_step is 33.000000, w is -0.977631, learning rate is 0.071773, loss is 0.000500
After 33 steps: global_step is 34.000000, w is -0.980842, learning rate is 0.071055, loss is 0.000367
After 34 steps: global_step is 35.000000, w is -0.983565, learning rate is 0.070345, loss is 0.000270
After 35 steps: global_step is 36.000000, w is -0.985877, learning rate is 0.069641, loss is 0.000199
After 36 steps: global_step is 37.000000, w is -0.987844, learning rate is 0.068945, loss is 0.000148
After 37 steps: global_step is 38.000000, w is -0.989520, learning rate is 0.068255, loss is 0.000110
After 38 steps: global_step is 39.000000, w is -0.990951, learning rate is 0.067573, loss is 0.000082
After 39 steps: global_step is 40.000000, w is -0.992174, learning rate is 0.066897, loss is 0.000061
current global_step: 0
current w1 [0.0, 0.0]
current global_step: 0
current w1 [1.0, 0.9]
current global_step: 100
current w1: [10.0, 1.6445453]
current global_step: 100
current w1: [10.0, 2.3281732]
current global_step: 100
current w1: [10.0, 2.955868]
current global_step: 100
current w1: [10.0, 3.532206]
current global_step: 100
current w1: [10.0, 4.061389]
current global_step: 100
current w1: [10.0, 4.547275]
current global_step: 100
current w1: [10.0, 4.9934072]