用共轭梯度法求解下列函数的极小点
取初值点
共轭梯度法算法如下:
FR公式 :
DM公式:
PRP公式:
代码部分:
import numpy as np
def fun(x, y):
return 4*x**2+4*y**2-4*x*y-12*y
def gfun(x, y):
delta_x = 1e-6
delta_y = 1e-6
grad_x = (fun(x+delta_x, y)-fun(x-delta_x, y))/(2.0*delta_x)
grad_y = (fun(x, y+delta_y)-fun(x, y-delta_y))/(2.0*delta_y)
grad_xy = np.array([grad_x, grad_y])
return grad_xy
def getstep(array_xy, array_d):
a0 = 1.0
e0 = 1e-6
delta_a = 1e-6
while(1):
new_a = array_xy + a0*array_d
new_a_l = array_xy + (a0-delta_a)*array_d
new_a_h = array_xy + (a0+delta_a)*array_d
diff_a0 = (fun(new_a_h[0], new_a_h[1]) - fun(new_a_l[0], new_a_l[1]))/(2.0*delta_a)
if np.abs(diff_a0) < e0:
break
ddiff_a0 = (fun(new_a_h[0], new_a_h[1]) + fun(new_a_l[0], new_a_l[1]) - 2.0*fun(new_a[0], new_a[1]))/(delta_a*delta_a)
a0 = a0 - diff_a0/ddiff_a0
return a0
def FR(grad_xy_new,grad_xy): #FR公式
return np.dot(grad_xy_new, grad_xy_new)/np.dot(grad_xy, grad_xy)
def PRP(grad_xy_new,grad_xy): #PRP公式
return np.dot(grad_xy_new, (grad_xy_new - grad_xy))/np.dot(grad_xy, grad_xy)
def DM(grad_xy_new,grad_xy): #DM公式
return -np.dot(grad_xy_new, grad_xy_new)/np.dot(-1.0*grad_xy,grad_xy)
def mainCG(A): #A为公式
ls_xy_history = [] #存储初始坐标的迭代结果
xy0 = np.array([-0.5, 1]) #初始点
grad_xy = gfun(xy0[0], xy0[1])
d = -1.0*grad_xy #初始搜索方向
e = 1e-6 #迭代退出条件
xy = xy0
while(1):
ls_xy_history.append(xy)
grad_xy = gfun(xy[0], xy[1])
tag_reach = np.abs(grad_xy) < e
if tag_reach.all():
break
step_length = getstep(xy, d)
xy_new = xy + step_length*d
grad_xy_new = gfun(xy_new[0], xy_new[1])
b = A(grad_xy_new,grad_xy)
d = b*d - grad_xy_new
xy = xy_new
array_xy_history = np.array(ls_xy_history)
return array_xy_history
print("FR共轭梯度法:\n{}".format(mainCG(FR)))
print("DM共轭梯度法:\n{}".format(mainCG(DM)))
print("PRP共轭梯度法:\n{}".format(mainCG(PRP)))