Tensorflow:学习率衰减

由于没有定义minimize(下面的一行代码),global_step不会自动加1,所以下面的global_step使用的是for循环(控制训练次数的循环)中的变量。

tf.train.Optimizer.minimize(loss, global_step=global_step)
图1 衰减的学习率

 


1/5.余弦衰减cosine_decay

余弦衰减的学习率计算公式:
global_step = min(global_step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * global_step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
decayed_learning_rate = learning_rate * decayed

# coding:utf-8
import matplotlib.pyplot as plt
import tensorflow as tf
 
y1 = []
y2 = []
y3 = []
N = 200
 
with tf.Session() as sess:
	for global_step in range(N):
		# 余弦衰减
		learing_rate1 = tf.train.cosine_decay(
			learning_rate=0.1, global_step=global_step, decay_steps=50)
		# 线性余弦衰减
		learing_rate2 = tf.train.linear_cosine_decay(
			learning_rate=0.1, global_step=global_step, decay_steps=50,
			num_periods=0.2, alpha=0.5, beta=0.2)
		# 噪声线性余弦衰减
		learing_rate3 = tf.train.noisy_linear_cosine_decay(
			learning_rate=0.1, global_step=global_step, decay_steps=50,
			initial_variance=0.01, variance_decay=0.1,
			num_periods=0.2, alpha=0.5, beta=0.2)
        
		lr1 = sess.run([learing_rate1])
		lr2 = sess.run([learing_rate2])
		lr3 = sess.run([learing_rate3])
		y1.append(lr1)
		y2.append(lr2)
		y3.append(lr3)
 
x = range(N)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(x, y1, 'b-', linewidth=2)
plt.plot(x, y2, 'r-', linewidth=2)
plt.plot(x, y3, 'g-', linewidth=2)
plt.title('cosine_decay')
ax.set_xlabel('step')
ax.set_ylabel('learing rate')
plt.savefig("1.png")

2/5.指数衰减exponential_decay

指数衰减的学习率计算公式:
decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
自然指数衰减的学习率计算公式:
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step)

# coding:utf-8
import matplotlib.pyplot as plt
import tensorflow as tf

y1 = []
y2 = []
y3 = []
y4 = []
N = 200

with tf.Session() as sess:
    for global_step in range(N):
        # 阶梯型自然指数衰减
        learing_rate1 = tf.train.natural_exp_decay(
            learning_rate=0.5, global_step=global_step, decay_steps=10, decay_rate=0.9, staircase=True)
        # 标准型自然指数衰减
        learing_rate2 = tf.train.natural_exp_decay(
            learning_rate=0.5, global_step=global_step, decay_steps=10, decay_rate=0.9, staircase=False)
        # 阶梯型指数衰减
        learing_rate3 = tf.train.exponential_decay(
            learning_rate=0.5, global_step=global_step, decay_steps=10, decay_rate=0.9, staircase=True)
        # 标准型指数衰减
        learing_rate4 = tf.train.exponential_decay(
            learning_rate=0.5, global_step=global_step, decay_steps=10, decay_rate=0.9, staircase=False)

        lr1 = sess.run([learing_rate1])
        lr2 = sess.run([learing_rate2])
        lr3 = sess.run([learing_rate3])
        lr4 = sess.run([learing_rate4])

        y1.append(lr1)
        y2.append(lr2)
        y3.append(lr3)
        y4.append(lr4)

x = range(N)
fig = plt.figure()
ax = fig.add_subplot(111)

plt.plot(x, y1, 'r-', linewidth=2)
plt.plot(x, y2, 'g--', linewidth=2)
plt.plot(x, y3, 'b-', linewidth=2)
plt.plot(x, y4, 'y-', linewidth=2)

plt.title('exp_decay')
ax.set_xlabel('step')
ax.set_ylabel('learing rate')
plt.savefig("1.png")

3/5.倒数衰减inverse_time_decay

倒数衰减的学习率计算公式:
decayed_learning_rate = learning_rate/(1+decay_rate* global_step/decay_step)

# coding:utf-8
import matplotlib.pyplot as plt
import tensorflow as tf

y1 = []
y2 = []
N = 200

with tf.Session() as sess:
    for global_step in range(N):
        # 阶梯型倒数衰减
        learing_rate1 = tf.train.inverse_time_decay(
            learning_rate=0.1, global_step=global_step, decay_steps=20, decay_rate=0.2, staircase=True)
        # 连续型倒数衰减
        learing_rate2 = tf.train.inverse_time_decay(
            learning_rate=0.1, global_step=global_step, decay_steps=20, decay_rate=0.2, staircase=False)

        lr1 = sess.run([learing_rate1])
        lr2 = sess.run([learing_rate2])

        y1.append(lr1)
        y2.append(lr2)

x = range(N)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(x, y1, 'r-', linewidth=2)
plt.plot(x, y2, 'g-', linewidth=2)
plt.title('inverse_time_decay')
ax.set_xlabel('step')
ax.set_ylabel('learing rate')
plt.savefig("1.png")

4/5.分段常数衰减piecewise_constant

#coding:utf-8
import matplotlib.pyplot as plt
import tensorflow as tf

boundaries = [10, 20, 30]
values = [0.1, 0.07, 0.025, 0.0125]

y = []
N = 40

with tf.Session() as sess:
	for global_step in range(N):
		learing_rate = tf.train.piecewise_constant(
			global_step,
			boundaries=boundaries,
			values=values)
		lr = sess.run(learing_rate)
		y.append(lr)

x = range(N)
plt.plot(x, y, 'r-', linewidth=2)
plt.title('piecewise_constant')
plt.savefig("1.png")

5/5.多项式衰减polynomial_decay

多项式衰减的学习率计算公式:
decayed_learning_rate = (learning_rate - end_learning_rate) *
                                             (1 - global_step / decay_steps) ^ (power) +
                                             end_learning_rate
cycle为False:
decay_steps = min(global_step, decay_steps)
cycle为True:
decay_steps = decay_steps * ceil(global_step / decay_steps)

#coding:utf-8
import matplotlib.pyplot as plt
import tensorflow as tf

y1 = []
y2 = []
N = 200

with tf.Session() as sess:
	for global_step in range(N):
		# cycle=False
		learing_rate1 = tf.train.polynomial_decay(
			learning_rate=0.1,
			global_step=global_step,
			decay_steps=50,
			end_learning_rate=0.01,
			power=0.5)
		# cycle=True
		learing_rate2 = tf.train.polynomial_decay(
			learning_rate=0.1,
			global_step=global_step,
			decay_steps=50,
			end_learning_rate=0.01,
			power=0.5,
			cycle=True)

		lr1 = sess.run([learing_rate1])
		lr2 = sess.run([learing_rate2])
		y1.append(lr1)
		y2.append(lr2)

x = range(N)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(x, y1, 'r-', linewidth=2)
plt.plot(x, y2, 'g--', linewidth=2)
plt.title('polynomial_decay')
ax.set_xlabel('step')
ax.set_ylabel('learing rate')
plt.show()

[参考]

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值