#将学习率设置为不可训练的variable,这样学习率就是图中一个运算节点,而非标量
self.decay_learning_rate = tf.Variable(float(self.learning_rate), trainable=False, dtype=tf.float32)
#优化算法获取学习率的话要首先运算节点的计算值
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.decay_learning_rate, initial_accumulator_value=1e-8).minimize(self.loss)
#在图中定义一个衰减策略
self.decay_ops = self.decay_learning_rate.assign(self.decay_learning_rate*0.5)
#当满足一定条件时,调用策略
res = self.sess.run(self.decay_ops)
或者:
#将学习率定义为占位符
learning_rate = tf.placeholder(tf.float32, shape=[])
train_step = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate).minimize(mse)
#run的时候输入不同学习率
sess.run(train_step, feed_dict={learning_rate: 0.1})
sess.run(train_step, feed_dict={learning_rate: 0.01})