ps:这是按照自己的理解写的程序,代码比较糙。献丑了,望海涵!
#FR method
'''
min f(x) = 2x1^2+x2^2
A = [[4,0],[0,2]]
'''
#original function
#x is col vector
import numpy as np
def func(x,A):
return 0.5*(x.T@A@x)
def func_der(x,A):
return A@x
def func_beta(beta1,beta2):
if beta1.T@beta1 != 0:
return (beta2.T@beta2)/(beta1.T@beta1)
else:
return 0
def conjugate_gradient(x0,A):
flag = True
eps = 1e-1
g1 = np.zeros((x0.shape[0],1))
d1 = np.zeros((x0.shape[0],1))
while flag:
#print('x0 = ',x0)
#print('g1 = ',g1)
g = func_der(x0,A)
#print('g = ',g)
d = -g
#print('d = ',d)
beta = func_beta(g1,g)
#print('beta = ',beta)
if np.linalg.norm(g)<eps:
flag = False
d = -g+beta*d1
lamba = -(g.T@d)/(d.T@A@d)
#print('lamba = ',lamba)
x = x0+lamba*d
x0 = x.copy()
g1 = g.copy()
d1 = d.copy()
#print('***********')
print('The value is {}'.format(x0))
A = np.array([[4,0],[0,2]])
x0 = np.array([[2],[2]])
conjugate_gradient(x0,A)