最速下降法python_算法最优化之最速下降法

适应范围:无约束非线性规划问题

例子:

equation?tex=min+f%28x%29%3D2x_1%5E2%2Bx_2%5E2

初始化

equation?tex=x%5E%7B%281%29%7D%3D%281%2C1%29%5ET ,

equation?tex=%5Cvarepsilon%3D%5Cfrac%7B1%7D%7B10%7D%3D0.1

第一次迭代:

equation?tex=%5Cnabla+f%28x%29%3D4x_1%2B2x_2%3D%5Cbegin%7Bbmatrix%7D+4x_1+%5C%5C+2x_%7B2%7D+%5Cend%7Bbmatrix%7D

equation?tex=d%5E%7B%281%29%7D%3D%5Cnabla+f%28x%5E%7B%281%29%7D%29%3D%5Cbegin%7Bbmatrix%7D-4%5C%5C-2%5Cend%7Bbmatrix%7D

equation?tex=%7C%7Cd%7C%7C%3D%5Csqrt%7B16%2B4%7D%3D2%5Csqrt%7B5%7D%3E%5Cfrac%7B1%7D%7B10%7D%3D%5Cvarepsilon

equation?tex=x%5E%7B%281%29%7D%3D%281%2C1%29%5ET 出发,沿方向

equation?tex=d%5E%7B%281%29%7D 进行一维搜索,求步长

equation?tex=%5Clambda_1 ,即

equation?tex=%5Cmin_%7B%5Clambda%3E%3D0%7D%5Cvarphi%28%5Clambda%29%3Df%28x%5E%7B%281%29%7D%2B%5Clambda+d%5E%7B%281%29%7D%29

equation?tex=x%5E%7B%281%29%7D%2B%5Clambda+d%5E%7B%281%29%7D%3D%5Cbegin%7Bbmatrix%7D1%5C%5C1%5Cend%7Bbmatrix%7D%2B%5Clambda%5Cbegin%7Bbmatrix%7D-4%5C%5C-2%5Cend%7Bbmatrix%7D%3D%5Cbegin%7Bbmatrix%7D1-4%5Clambda%5C%5C1-2%5Clambda%5Cend%7Bbmatrix%7D

equation?tex=%5Cvarphi%28%5Clambda%29%3D2%281-4%5Clambda%29%5E2%2B%281-2%5Clambda%29%5E2

equation?tex=%5Cvarphi%5E%7B%27%7D%28%5Clambda%29%3D-16%281-4%5Clambda%29-4%281-2%5Clambda%29%3D0

equation?tex=%5Clambda_1%3D%5Cfrac%7B5%7D%7B18%7D

在直线的极小点

equation?tex=x%5E%7B%282%29%7D%3Dx%5E%7B%281%29%7D%2B%5Clambda+d%5E%7B%281%29%7D%3D%5Cbegin%7Bbmatrix%7D-%5Cfrac%7B1%7D%7B9%7D%5C%5C-%5Cfrac%7B4%7D%7B9%7D%5Cend%7Bbmatrix%7D

第二次迭代:

equation?tex=d%5E%7B%282%29%7D%3D-%5Cnabla+f%28x%5E%7B%282%29%7D%29%3D%5Cbegin%7Bbmatrix%7D-%5Cfrac%7B4%7D%7B9%7D%5C%5C%5Cfrac%7B8%7D%7B9%7D%5Cend%7Bbmatrix%7D

equation?tex=%7C%7Cd%7C%7C%3D%5Csqrt%7B%5Cfrac%7B16%7D%7B81%7D%2B%5Cfrac%7B61%7D%7B81%7D%7D%3D%5Cfrac%7B%5Csqrt%7B77%7D%7D%7B9%7D%3E%5Cfrac%7B1%7D%7B10%7D%3D%5Cvarepsilon

equation?tex=x%5E%7B%282%29%7D%3D%28-%5Cfrac%7B1%7D%7B9%7D%2C%5Cfrac%7B4%7D%7B9%7D%29%5ET 出发,沿方向

equation?tex=d%5E%7B%282%29%7D 进行一维搜索,求步长

equation?tex=%5Clambda_2

equation?tex=%5Cmin_%7B%5Clambda%3E%3D0%7D%5Cvarphi%28%5Clambda%29%3Df%28x%5E%7B%282%29%7D%2B%5Clambda+d%5E%7B%282%29%7D%29

equation?tex=x%5E%7B%282%29%7D%2B%5Clambda+d%5E%7B%282%29%7D%3D%5Cbegin%7Bbmatrix%7D-%5Cfrac%7B1%7D%7B9%7D%5C%5C%5Cfrac%7B4%7D%7B9%7D%5Cend%7Bbmatrix%7D%2B%5Clambda+%5Cbegin%7Bbmatrix%7D-%5Cfrac%7B4%7D%7B9%7D%5C%5C%5Cfrac%7B8%7D%7B9%7D%5Cend%7Bbmatrix%7D%3D%5Cbegin%7Bbmatrix%7D-%5Cfrac%7B1%7D%7B9%7D-%5Clambda+%5Cfrac%7B4%7D%7B9%7D%5C%5C%5Cfrac%7B4%7D%7B9%7D%2B%5Clambda+%5Cfrac%7B8%7D%7B9%7D%5Cend%7Bbmatrix%7D

equation?tex=%5Cvarphi%28%5Clambda%29%3D2%28-%5Cfrac%7B1%7D%7B9%7D-%5Clambda%5Cfrac%7B4%7D%7B9%7D%29%5E2%2B%28%5Cfrac%7B4%7D%7B9%7D%2B%5Clambda%5Cfrac%7B8%7D%7B9%7D%29%5E2

equation?tex=%5Cvarphi%5E%7B%27%7D%28%5Clambda%29%3D-%5Cfrac%7B16%7D%7B9%7D%28-%5Cfrac%7B1%7D%7B9%7D-%5Clambda%5Cfrac%7B4%7D%7B9%7D%29%2B%5Cfrac%7B16%7D%7B9%7D%28%5Cfrac%7B4%7D%7B9%7D%2B%5Clambda%5Cfrac%7B8%7D%7B9%7D%29%3D0

解得:

equation?tex=%5Clambda_2%3D-%5Cfrac%7B27%7D%7B92%7D

equation?tex=x%5E%7B%283%29%7D%3Dx%5E%7B%282%29%7D%2B%5Clambda+d%5E%7B%282%29%7D%3D%5Cbegin%7Bbmatrix%7D-%5Cfrac%7B65%7D%7B207%7D%5C%5C%5Cfrac%7B130%7D%7B207%7D%5Cend%7Bbmatrix%7D

第三次迭代:

equation?tex=d%5E%7B%283%29%7D%3D-%5Cnabla+f%28x%5E%7B%283%29%7D%29%3D%5Cbegin%7Bbmatrix%7D%5Cfrac%7B260%7D%7B207%7D%5C%5C-%5Cfrac%7B260%7D%7B207%7D%5Cend%7Bbmatrix%7D

equation?tex=%7C%7Cd%7C%7C%3D%5Csqrt%7B2%7D%5Cfrac%7B260%7D%7B207%7D%3D1.776%3E%5Cfrac%7B1%7D%7B10%7D

最终结果展示:使用python实现结果

实现代码展示:

import random

import numpy as np

import matplotlib.pyplot as plt

"""

最速下降法

Rosenbrock函数

函数 f(x) = 2*x(1)^2+x(2)^2

梯度 g(x)=(4*x(1)),2*x(2))^(T)

"""

def goldsteinsearch(f,df,d,x,alpham,rho,t):

'''

线性搜索子函数

数f,导数df,当前迭代点x和当前搜索方向d

'''

flag = 0

a = 0

b = alpham

fk = f(x)

gk = df(x)

phi0 = fk

dphi0 = np.dot(gk, d)

# print(dphi0)

alpha=b*random.uniform(0,1)

while(flag==0):

newfk = f(x + alpha * d)

phi = newfk

# print(phi,phi0,rho,alpha ,dphi0)

if (phi - phi0 )<= (rho * alpha * dphi0):

if (phi - phi0) >= ((1 - rho) * alpha * dphi0):

flag = 1

else:

a = alpha

b = b

if (b < alpham):

alpha = (a + b) / 2

else:

alpha = t * alpha

else:

a = a

b = alpha

alpha = (a + b) / 2

return alpha

def rosenbrock(x):

# 函数:f(x) = 2*x(1)^2+x(2)^2

return 2*x[0]**2+x[1]**2

def jacobian(x):

# 梯度 g(x)=(4*x(1)),2*x(2))^(T)

return np.array([4*x[0],2*x[1]])

def steepest(x0):

print('初始点为:')

print(x0,'\n')

imax = 20000

W = np.zeros((2, imax))

epo=np.zeros((2, imax))

W[:, 0] = x0

i = 1

x = x0

grad = jacobian(x)

delta = sum(grad ** 2) # 初始误差

f=open("最速.txt",'w')

while i < imax and delta > 10 ** (-5):

p = -jacobian(x)

x0 = x

alpha = goldsteinsearch(rosenbrock, jacobian, p, x, 1, 0.1, 2)

x = x + alpha * p

W[:, i] = x

epo[:,i] =np.array((i,delta))

f.write(str(i)+" "+str(delta)+"\n")#

print(i,np.array((i,delta)))

grad = jacobian(x)

delta = sum(grad ** 2)

i = i + 1

print("迭代次数为:", i)

print("近似最优解为:")

print(x, '\n')

W = W[:, 0:i] # 记录迭代点

return [W,epo]

if __name__=="__main__":

X1 = np.arange(-1.5, 1.5 + 0.05, 0.05)

X2 = np.arange(-3.5, 4 + 0.05, 0.05)

[x1, x2] = np.meshgrid(X1, X2)

f = 2*x1**2+x2**2

plt.contour(x1, x2, f, 20) # 画出函数的20条轮廓线

x0 = np.array([1, 1])

list_out = steepest(x0)

W=list_out[0]

epo=list_out[1]

plt.plot(W[0, :], W[1, :], 'g*-') # 画出迭代点收敛的轨迹

plt.show()

参考文献:最速下降法(梯度下降法)python实现_码神岛​msd.misuland.com0dcac61b3ea9b8f72a78577e7b4029fb.png

  • 3
    点赞
  • 15
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值