NNDL 作业12 优化算法2D可视化

简要介绍图中的优化算法,编程实现并2D可视化

1. 被优化函数 

# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
 
 
class SGD:
    """随机梯度下降法(Stochastic Gradient Descent)"""
 
    def __init__(self, lr=0.01):
        self.lr = lr
 
    def update(self, params, grads):
        for key in params.keys():
            params[key] -= self.lr * grads[key]
 
 
class Momentum:
    """Momentum SGD"""
 
    def __init__(self, lr=0.01, momentum=0.9):
        self.lr = lr
        self.momentum = momentum
        self.v = None
 
    def update(self, params, grads):
        if self.v is None:
            self.v = {}
            for key, val in params.items():
                self.v[key] = np.zeros_like(val)
 
        for key in params.keys():
            self.v[key] = self.momentum * self.v[key] - self.lr * grads[key]
            params[key] += self.v[key]
 
 
class Nesterov:
    """Nesterov's Accelerated Gradient (http://arxiv.org/abs/1212.0901)"""
 
    def __init__(self, lr=0.01, momentum=0.9):
        self.lr = lr
        self.momentum = momentum
        self.v = None
 
    def update(self, params, grads):
        if self.v is None:
            self.v = {}
            for key, val in params.items():
                self.v[key] = np.zeros_like(val)
 
        for key in params.keys():
            self.v[key] *= self.momentum
            self.v[key] -= self.lr * grads[key]
            params[key] += self.momentum * self.momentum * self.v[key]
            params[key] -= (1 + self.momentum) * self.lr * grads[key]
 
 
class AdaGrad:
    """AdaGrad"""
 
    def __init__(self, lr=0.01):
        self.lr = lr
        self.h = None
 
    def update(self, params, grads):
        if self.h is None:
            self.h = {}
            for key, val in params.items():
                self.h[key] = np.zeros_like(val)
 
        for key in params.keys():
            self.h[key] += grads[key] * grads[key]
            params[key] -= self.lr * grads[key] / (np.sqrt(self.h[key]) + 1e-7)
 
 
class RMSprop:
    """RMSprop"""
 
    def __init__(self, lr=0.01, decay_rate=0.99):
        self.lr = lr
        self.decay_rate = decay_rate
        self.h = None
 
    def update(self, params, grads):
        if self.h is None:
            self.h = {}
            for key, val in params.items():
                self.h[key] = np.zeros_like(val)
 
        for key in params.keys():
            self.h[key] *= self.decay_rate
            self.h[key] += (1 - self.decay_rate) * grads[key] * grads[key]
            params[key] -= self.lr * grads[key] / (np.sqrt(self.h[key]) + 1e-7)
 
 
class Adam:
    """Adam (http://arxiv.org/abs/1412.6980v8)"""
 
    def __init__(self, lr=0.001, beta1=0.9, beta2=0.999):
        self.lr = lr
        self.beta1 = beta1
        self.beta2 = beta2
        self.iter = 0
        self.m = None
        self.v = None
 
    def update(self, params, grads):
        if self.m is None:
            self.m, self.v = {}, {}
            for key, val in params.items():
                self.m[key] = np.zeros_like(val)
                self.v[key] = np.zeros_like(val)
 
        self.iter += 1
        lr_t = self.lr * np.sqrt(1.0 - self.beta2 ** self.iter) / (1.0 - self.beta1 ** self.iter)
 
        for key in params.keys():
            self.m[key] += (1 - self.beta1) * (grads[key] - self.m[key])
            self.v[key] += (1 - self.beta2) * (grads[key] ** 2 - self.v[key])
 
            params[key] -= lr_t * self.m[key] / (np.sqrt(self.v[key]) + 1e-7)
 
 
# def f(x, y):
#     return x ** 2 / 20.0 + y ** 2
def f(x,_):
    return x**2
 
# def df(x, y):
#     return x / 10.0, 2.0 * y
def df(x,_):
    return 2*x,0
 
# init_pos = (-7.0, 2.0)
init_pos=(-7.0,)
params = {}
# params['x'], params['y'] = init_pos[0], init_pos[1]
params['x']=init_pos[0]
grads = {}
# grads['x'], grads['y'] = 0, 0
grads['x']=0
learningrate = [0.9, 0.3, 0.3, 0.6, 0.3, 0.6, 0.6]
optimizers = OrderedDict()
optimizers["SGD"] = SGD(lr=learningrate[0])
optimizers["Momentum"] = Momentum(lr=learningrate[1])
optimizers["Nesterov"] = Nesterov(lr=learningrate[2])
optimizers["AdaGrad"] = AdaGrad(lr=learningrate[3])
optimizers["RMSprop"] = RMSprop(lr=learningrate[4])
optimizers["Adam"] = Adam(lr=learningrate[5])
 
idx = 1
id_lr = 0
 
for key in optimizers:
    optimizer = optimizers[key]
    lr = learningrate[id_lr]
    id_lr = id_lr + 1
    x_history = []
    y_history = []
    # params['x'], params['y'] = init_pos[0], init_pos[1]
    params['x']=init_pos[0]
    for i in range(30):
        x_history.append(params['x'])
        # y_history.append(params['y'])
        y_history.append(0)
 
        # grads['x'], grads['y'] = df(params['x'], params['y'])
        grads['x'],_=df(params['x'],0)
        optimizer.update(params, grads)
 
    x = np.arange(-10, 10, 0.01)
    y = np.arange(-5, 5, 0.01)
 
    X, Y = np.meshgrid(x, y)
    Z = f(X, Y)
    # for simple contour line
    mask = Z > 7
    Z[mask] = 0
 
    # plot
    plt.subplot(2, 3, idx)
    idx += 1
    plt.plot(x_history, y_history, 'o-', color="r")
    # plt.contour(X, Y, Z)  # 绘制等高线
    plt.contour(X, Y, Z, cmap='gray')  # 颜色填充
    plt.ylim(-10, 10)
    plt.xlim(-10, 10)
    plt.plot(0, 0, '+')
    # plt.axis('off')
    # plt.title(key+'\nlr='+str(lr), fontstyle='italic')
    plt.text(0, 10, key + '\nlr=' + str(lr), fontsize=20, color="b",
             verticalalignment='top', horizontalalignment='center', fontstyle='italic')
    plt.xlabel("x")
    plt.ylabel("y")
 
plt.subplots_adjust(wspace=0, hspace=0)  # 调整子图间距
plt.show()


2. 被优化函数  

# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
 
 
class SGD:
    """随机梯度下降法(Stochastic Gradient Descent)"""
 
    def __init__(self, lr=0.01):
        self.lr = lr
 
    def update(self, params, grads):
        for key in params.keys():
            params[key] -= self.lr * grads[key]
 
 
class Momentum:
    """Momentum SGD"""
 
    def __init__(self, lr=0.01, momentum=0.9):
        self.lr = lr
        self.momentum = momentum   #控制之前的速度对当前梯度的影响程度。(动量)
        self.v = None
 
    def update(self, params, grads):
        if self.v is None:
            self.v = {}
            for key, val in params.items():
                self.v[key] = np.zeros_like(val)
 
        for key in params.keys():
            self.v[key] = self.momentum * self.v[key] - self.lr * grads[key]
            params[key] += self.v[key]
 
 
class Nesterov:
    """Nesterov's Accelerated Gradient (http://arxiv.org/abs/1212.0901)"""
 
    def __init__(self, lr=0.01, momentum=0.9):
        self.lr = lr
        self.momentum = momentum
        self.v = None
 
    def update(self, params, grads):
        if self.v is None:
            self.v = {}
            for key, val in params.items():
                self.v[key] = np.zeros_like(val)
 
        for key in params.keys():
            self.v[key] *= self.momentum
            self.v[key] -= self.lr * grads[key] #计算动量修正
            params[key] += self.momentum * self.momentum * self.v[key]
            params[key] -= (1 + self.momentum) * self.lr * grads[key]       #更新
 
 
class AdaGrad:
    """AdaGrad"""
 
    def __init__(self, lr=0.01):
        self.lr = lr
        self.h = None
 
    def update(self, params, grads):
        if self.h is None:
            self.h = {}
            for key, val in params.items():
                self.h[key] = np.zeros_like(val)
 
        for key in params.keys():
            self.h[key] += grads[key] * grads[key]  #累积历史梯度平方和
            params[key] -= self.lr * grads[key] / (np.sqrt(self.h[key]) + 1e-7) #更新参数,括号里的就是自适应调整学习率的过程
 
 
class RMSprop:
    """RMSprop"""
 
    def __init__(self, lr=0.01, decay_rate=0.9):
        self.lr = lr
        self.decay_rate = decay_rate    #平滑指数加权移动平均的超参数 常取0.9
        self.h = None
 
    def update(self, params, grads):
        if self.h is None:
            self.h = {}
            for key, val in params.items():
                self.h[key] = np.zeros_like(val)
 
        for key in params.keys():
            self.h[key] *= self.decay_rate
            self.h[key] += (1 - self.decay_rate) * grads[key] * grads[key]  #计算梯度的平方的指数加权移动平均值
            params[key] -= self.lr * grads[key] / (np.sqrt(self.h[key]) + 1e-7)#更新。1e-7是为了稳定添加的非常小的常数
 
 
class Adam:
    """Adam (http://arxiv.org/abs/1412.6980v8)"""
 
    def __init__(self, lr=0.001, beta1=0.9, beta2=0.999):
        self.lr = lr
        self.beta1 = beta1
        self.beta2 = beta2
        self.iter = 0
        self.m = None
        self.v = None
 
    def update(self, params, grads):
        if self.m is None:
            self.m, self.v = {}, {}
            for key, val in params.items():
                self.m[key] = np.zeros_like(val)
                self.v[key] = np.zeros_like(val)
 
        self.iter += 1
        lr_t = self.lr * np.sqrt(1.0 - self.beta2 ** self.iter) / (1.0 - self.beta1 ** self.iter)
 
        for key in params.keys():
            self.m[key] += (1 - self.beta1) * (grads[key] - self.m[key])
            self.v[key] += (1 - self.beta2) * (grads[key] ** 2 - self.v[key])
 
            params[key] -= lr_t * self.m[key] / (np.sqrt(self.v[key]) + 1e-7)
 
 
def f(x, y):
    return x ** 2 / 20.0 + y ** 2
# def f(x,_):
#     return x**2
 
def df(x, y):
    return x / 10.0, 2.0 * y
# def df(x,_):
#     return 2*x,0
 
init_pos = (-7.0, 2.0)
# init_pos=(-7.0,)
params = {}
params['x'], params['y'] = init_pos[0], init_pos[1]
# params['x']=init_pos[0]
grads = {}
grads['x'], grads['y'] = 0, 0
# grads['x']=0
learningrate = [0.9, 0.1, 0.2, 0.6, 0.6, 0.3, 0.6]
optimizers = OrderedDict()
optimizers["SGD"] = SGD(lr=learningrate[0])
optimizers["Momentum"] = Momentum(lr=learningrate[1])
optimizers["Nesterov"] = Nesterov(lr=learningrate[2])
optimizers["AdaGrad"] = AdaGrad(lr=learningrate[3])
optimizers["RMSprop"] = RMSprop(lr=learningrate[4])
optimizers["Adam"] = Adam(lr=learningrate[5])
 
idx = 1
id_lr = 0
 
for key in optimizers:
    optimizer = optimizers[key]
    lr = learningrate[id_lr]
    id_lr = id_lr + 1
    x_history = []
    y_history = []
    params['x'], params['y'] = init_pos[0], init_pos[1]
    # params['x']=init_pos[0]
    for i in range(30):
        x_history.append(params['x'])
        y_history.append(params['y'])
        # y_history.append(0)
 
        grads['x'], grads['y'] = df(params['x'], params['y'])
        # grads['x'],_=df(params['x'],0)
        optimizer.update(params, grads)
 
    x = np.arange(-10, 10, 0.01)
    y = np.arange(-5, 5, 0.01)
 
    X, Y = np.meshgrid(x, y)
    Z = f(X, Y)
    # for simple contour line
    mask = Z > 7
    Z[mask] = 0
 
    # plot
    plt.subplot(2, 3, idx)
    idx += 1
    plt.plot(x_history, y_history, 'o-', color="r")
    # plt.contour(X, Y, Z)  # 绘制等高线
    plt.contour(X, Y, Z, cmap='gray')  # 颜色填充
    plt.ylim(-10, 10)
    plt.xlim(-10, 10)
    plt.plot(0, 0, '+')
    # plt.axis('off')
    # plt.title(key+'\nlr='+str(lr), fontstyle='italic')
    plt.text(0, 10, key + '\nlr=' + str(lr), fontsize=20, color="b",
             verticalalignment='top', horizontalalignment='center', fontstyle='italic')
    plt.xlabel("x")
    plt.ylabel("y")
 
plt.subplots_adjust(wspace=0, hspace=0)  # 调整子图间距
plt.show()


3. 解释不同轨迹的形成原因

  SGD:SGD是最基本的优化算法,它通过计算每个样本的梯度来更新模型参数。由于每次只考虑一个样本,SGD的更新轨迹会表现为噪声较大、震荡幅度较大的情况,每一次的迭代并不是朝着梯度下降最大的方向下降。

Momentum:动量法在梯度更新时引入了一个动量项,它可以看作是模拟物体在梯度方向上受到的惯性作用。动量法能够在梯度变化方向一致时加速收敛,并且在梯度变化方向相反时减小震荡。因此,动量法的轨迹通常会表现为连续的、平滑的路径。

Nestrov:Nesterov 方法是动量法的一种改进,它通过提前根据当前的动量更新参数,然后再计算梯度来进行修正。这种修正使得 Nesterov 方法能够更好地估计下一步的参数位置,在某些情况下比标准动量法更快地收敛。

Adagrad:Adagrad 算法通过为每个参数分配不同的学习率,根据该参数在训练过程中的历史梯度大小进行自适应调整。这使得 Adagrad 在处理稀疏数据或具有不同特征尺度的问题时表现良好。由于学习率逐渐减小,Adagrad 的轨迹会变得越来越平缓。

RMSprop:RMSprop 也是一种自适应学习率算法,它通过对过去梯度的平方进行指数加权移动平均来调整学习率。RMSprop 能够有效处理非平稳目标函数,并且相对于 Adagrad 对学习率进行了进一步的修正,使得轨迹更加平滑。

Adam:Adam 是一种结合了动量法和自适应学习率的优化算法。它同时利用梯度的一阶矩估计(动量项)和二阶矩估计(自适应学习率),从而兼顾了收敛速度和参数更新的稳定。Adam 的轨迹通常是平滑的且具有快速收敛速度。


分析各个算法的优缺点

SGD

优点:计算简单,内存占用较小。

缺点:容易陷入局部最小值,收敛速度慢;更新方向不稳定,导致震荡。

Momentum

优点:加速收敛,尤其在平坦表面或噪声较多的情况下效果好;减小震荡,提高稳定性。

缺点:可能会增加振荡风险;需要调节动量参数。

Nesterov

优点:在凸问题中具有更快的收敛速度;在非凸问题中可以更好地避免误导。

缺点:相对于标准动量法,需要额外的计算。

Adagrad

优点:自适应学习率,适合处理稀疏数据和具有不同特征尺度的问题。

缺点:学习率逐渐减小,可能导致较早的参数更新过大,使得算法在后期难以收敛。

RMSprop

优点:自适应学习率,相对于 Adagrad 对学习率进行了进一步的修正,使得算法更稳定。

缺点:对于某些问题,仍可能出现学习率过小或过大的情况。

Adam

优点:结合了动量法和自适应学习率,具有快速收敛速度和参数更新的稳定性;适用于大多数问题。

缺点:需要调节额外的超参数;对于小数据集可能过拟合。

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值