TF:tf.train.optimizer

tf的7中优化方法

optimizer = tf.train.AdadeltaOptimizer(rate)
optimizer = tf.train.AdagradOptimizer(rate)
optimizer = tf.train.AdamOptimizer(rate)
optimizer = tf.train.FtrlOptimizer(rate)
optimizer = tf.train.GradientDescentOptimizer(rate)
optimizer = tf.train.MomentumOptimizer(rate)
optimizer = tf.train.RMSPropOptimizer(rate)
train = optimizer.minimize(loss, global_step=step)

用法:

opt = tf.train.XXXOptimizer(learning_rate)#不同的优化器还会有个别自己的参数
opt_op = opt.minimize(loss,
                      global_step=None,
                      var_list=None,
                      gate_gradients=1,
                      name=None)
'''
Args:
    loss:损失函数
    global_step:(见下文)
    var_list:最小化loss时要优化的变量,默认是GraphKeys.TRAINABLE_VARIABLES中的变量
    gate_gradients:可选GATE_NONE、GATE_OP、GATE_GRAPH
    name:给返回的op取的名字
'''

global_step用法

#3种定义
global_steps = tf.Variable(0, trainable=False)
global_steps = tf.train.get_or_create_global_step()
global_steps = tf.contrib.framework.get_or_create_global_step()

opt_op = opt.minimize(loss, global_step = global_steps)

必须有global_step = global_steps这个参数,global_steps(不是global_step)才会自动加1,相关的学习率等才会自动递减:

import tensorflow as tf
import numpy as np
 
x = tf.placeholder(tf.float32, shape=[None, 1], name='x')
y = tf.placeholder(tf.float32, shape=[None, 1], name='y')
w = tf.Variable(tf.constant(0.0))
 
#global_steps = tf.Variable(0, trainable=False)
#global_steps = tf.train.get_or_create_global_step()
global_steps = tf.contrib.framework.get_or_create_global_step()
 
learning_rate = tf.train.exponential_decay(0.1, global_steps, 10, 2, staircase=False)
loss = tf.pow(w*x-y, 2)
 
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_steps)
 
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(10):
        sess.run(train_step, feed_dict={x:np.linspace(1,2,10).reshape([10,1]), y:np.linspace(1,2,10).reshape([10,1])})
        print("--Step: %2d, learn_rate: %f." % (sess.run(global_steps), sess.run(learning_rate)))

如果没有global_step = global_steps,learning_rate恒为0.1。[参考]

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值