梯度下降法和加速梯度下降法的实现

import numpy as np
import numpy.linalg as nl

def GradientDescent(A, b):
    num = 0
    xOld = np.zeros([1000, 1])
    xNew = np.zeros([1000, 1])
    gamma = 1e-8
    while num < 1000:
        # J = 1/2 * nl.norm(np.dot(A, xOld) - b)
        xNew = xOld - gamma * (np.dot(np.dot(A.transpose(), A), xOld) - np.dot(A.transpose(), b))
        if nl.norm(xNew - xOld) < 1e-4:
            # xFinal = xNew
            # break
            break
        xOld = xNew
        # print(xOld)
        num = num + 1
    return xNew
        
def AccelerateGradientDescent(A, b):
    num = 0
    xOld = np.zeros([1000, 1])
    yOld = np.zeros([1000, 1])
    xNew = np.zeros([1000, 1])
    yNew = np.zeros([1000, 1])
    lamda = np.zeros(1001)
    gamma = np.zeros(1000)
    beta = 1e-8
    for i in range(1, 1001):
        lamda[i] = (1 + np.sqrt(1 + 4 * np.square(lamda[i - 1]))) / 2
    for i in range(0, 1000):
        gamma[i] = (1 - lamda[i]) / lamda[i + 1]
    while num < 1000:
        yNew = xOld - beta * (np.dot(np.dot(A.transpose(), A), xOld) - np.dot(A.transpose(), b))
        # print(yNew)
        xNew = (1 - gamma[num]) * yNew + gamma[num] * yOld
        # print(xNew)
        if nl.norm(xNew - xOld) < 1e-5 and num > 1:
            break
        xOld = xNew
        yOld = yNew
        # print(xOld)
        num = num + 1
    return xNew
    
if __name__ == "__main__":
    A = np.random.randint(1, 10, size = (1000, 1000))
    xStar = np.zeros([1000, 1])
    for i in range(0, 1000):
        xStar[i][0] = 1
    b = np.dot(A, xStar)
    result1 = GradientDescent(A, b)
    print(result1)
    result2 = AccelerateGradientDescent(A, b)
    print(result2)

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值