SGD
optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
Momentum
optimizer = tf.train.MomentumOptimizer(lr, 0.9)
AdaGrad
optimizer = tf.train.AdagradientOptimizer(learning_rate=self.learning_rate)
RMSProp
optimizer = tf.train.RMSPropOptimizer(0.001, 0.9)
ADAM
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, epsilon=1e-08)
部分局部参数需要查找tensorflow官方文档
直接进行优化
train_op = optimizer.minimize(loss)
获得提取进行截断等处理
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, self.max_gradient_norm)
train_op = optimizer.apply_gradients(zip(gradients, v), global_step=self.global_step)