其中共轭方向的确定方法如下:
代码传送门:https://github.com/taifyang/optimization-method
python实现:
import sympy
import numpy as np
#f为要求极值的函数,x0为初始位置,max_iter为最大迭代次数,epsilon为相邻两次迭代的x改变量
def conjugate_gradient_x(f, x0, max_iter, epsilon):
i = 0 #记录迭代次数的变量
x0 = float(x0) #浮点数计算更快
df = sympy.diff(f, x) #定义一阶导数
alpha = 0.001
beta = 0.5 #beta 0~1
delta = 0.25 #delta 0~0.5
while i < max_iter:
gk = df.subs(x, x0)
dk = -gk
#xnew = x0 + alpha*dk
mk = 0
while mk < 10:
if f.subs(x, x0+beta**mk*dk) < f.subs(x,x0) + delta*beta**mk*gk*dk:
break
mk += 1
xnew = x0 + beta**mk*dk
gknew = df.subs(x, xnew)
betak = gknew**2/gk**2
dknew = -gknew + betak*dk
#xnew += alpha*dknew
xnew += beta**mk*dknew
i += 1
print('迭代第%d次:%.5f' %(i, xnew))
if abs(df.subs(x, xnew)-df.subs(x, x0)) < epsilon:
break
x0 = xnew
return xnew
#f为要求极值的函数,X0为初始位置,max_iter为最大迭代次数,epsilon为相邻两次迭代的x改变量
def conjugate_gradient_x0x1(f, X0, max_iter, epsilon):
i = 0 #记录迭代次数的变量
X0[0], X0[1] = float(X0[0]), float(X0[1]) #浮点数计算更快
df0 = sympy.diff(f, x0) #定义一阶导数
df1 = sympy.diff(f, x1)
alpha = 0.001
beta = 0.5 #beta 0~1
delta = 0.25 #delta 0~0.5
while i < max_iter:
gk = np.mat([float(df0.subs([(x0, X0[0]), (x1, X0[1])])), float(df1.subs([(x0, X0[0]), (x1, X0[1])]))]).T #梯度矩阵
dk = -gk
#Xnew = [X0[0] + alpha*dk[0,0], X0[1] + alpha*dk[1,0]]
mk = 0
while mk < 10:
if f.subs([(x0, X0[0]+beta**mk*dk[0,0]), (x1, X0[1]+beta**mk*dk[1,0])]) < f.subs([(x0, X0[0]), (x1, X0[1])]) + delta*beta**mk*gk.T*dk:
break
mk += 1
Xnew = [X0[0] + beta**mk*dk[0,0], X0[1] + beta**mk*dk[1,0]]
gknew = np.mat([df0.subs(x0, Xnew[0]), df1.subs(x1, Xnew[1])])
betak = (gknew.T*gknew)/(gk.T*gk)
dknew = -gknew + betak*dk
#Xnew = [Xnew[0] + alpha*dknew[0,0], Xnew[1] + alpha*dknew[1,0]]
Xnew = [Xnew[0] + beta**mk*dknew[0,0], Xnew[1] + beta**mk*dknew[1,0]]
i += 1
print('迭代第%d次:[%.5f, %.5f]' %(i, Xnew[0], Xnew[1]))
if abs(f.subs([(x0, Xnew[0]), (x1, Xnew[1])])-f.subs([(x0, X0[0]), (x1, X0[1])])) < epsilon:
break
X0 = Xnew
return Xnew
if __name__ == '__main__':
x = sympy.symbols("x")
x0 = sympy.symbols("x0")
x1 = sympy.symbols("x1")
result = conjugate_gradient_x(x**4-4*x, 10, 50000, 1e-5)
print('最佳迭代的位置:%.5f' %result)
result = conjugate_gradient_x0x1((x0-1)**2+(x1-1)**4, [10,10], 50000, 1e-5)
print('最佳迭代位置:[%.5f, %.5f]' %(result[0], result[1]))
C++实现:
#include <iostream>
#include <vector>
#include <Eigen/Dense>
const double dx = 1e-3;
double f(double x)
{
return pow(x, 4) - 4 * x;
}
double df(double x)
{
//return 4 * pow(x, 3) - 4;
return (f(x + dx) - f(x)) / dx;
}
double f(std::vector<double> X)
{
return pow(X[0] - 1, 2) + pow(X[1] - 1, 4);
}
double df0(std::vector<double> X)
{
//return 2 * (X[0] - 1);
return (f({ X[0] + dx,X[1] }) - f(X)) / dx;
}
double df1(std::vector<double> X)
{
//return 4 * pow(X[1] - 1, 3);
return (f({ X[0], X[1] + dx }) - f(X)) / dx;
}
double conjugate_gradient_x(double x0, int max_iter, double epsilon)
{
int i = 0;
double alpha = 0.001;
double beta = 0.5;
double delta = 0.25;
double xnew;
while (i < max_iter)
{
double gk = df(x0);
double dk = -gk;
//xnew = x0 + alpha*dk;
int mk = 0;
while (mk < 10)
{
if (f(x0 + pow(beta, mk)*dk) < f(x0) + delta*pow(beta, mk)*gk*dk)
break;
++mk;
}
xnew = x0 + pow(beta, mk)*dk;
double gknew = df(xnew);
double betak = pow(gknew, 2) / pow(gk, 2);
double dknew = -gknew + betak*dk;
//xnew += alpha*dknew;
xnew += pow(beta, mk)*dknew;
++i;
std::cout << "迭代次数:" << i << " " << x0 << std::endl;
if (abs(f(xnew) - f(x0)) < epsilon)
break;
x0 = xnew;
}
return xnew;
}
std::vector<double> conjugate_gradient_x0x1(std::vector<double> X0, int max_iter, double epsilon)
{
int i = 0;
double alpha = 0.001;
double beta = 0.5;
double delta = 0.25;
std::vector<double> Xnew;
while (i < max_iter)
{
Eigen::Vector2d gk = { df0(X0), df1(X0) };
Eigen::Vector2d dk = -gk;
//Xnew = { X0[0] + alpha*dk(0), X0[1] + alpha*dk(1) };
int mk = 0;
while (mk < 10)
{
Xnew = { X0[0] + pow(beta, mk)*dk(0), X0[1] + pow(beta, mk)*dk(1) };
if (f(Xnew) < f(X0) + delta*pow(beta, mk)*gk.transpose()*dk)
break;
++mk;
}
Xnew = { X0[0] + pow(beta, mk)*dk(0), X0[1] + pow(beta, mk)*dk(1) };
Eigen::Vector2d gknew = { df0(X0), df1(X0) };
double betak = (gknew.transpose()*gknew)(0, 0) / (gk.transpose()*gk)(0, 0);
Eigen::Vector2d dknew = -gknew + betak*dk;
//Xnew = { Xnew[0] + alpha*dk(0), Xnew[1] + alpha*dk(1) };
Xnew = { Xnew[0] + pow(beta, mk)*dk(0), Xnew[1] + pow(beta, mk)*dk(1) };
++i;
std::cout << "迭代次数:" << i << " " << X0[0] << " " << X0[1] << std::endl;
if (abs(f(Xnew) - f(X0)) < epsilon)
break;
X0 = Xnew;
}
return X0;
}
int main(int argc, char* argv[])
{
double result = conjugate_gradient_x(10, 50000, 1e-5);
std::cout << "最佳迭代位置:" << result << std::endl;
std::vector<double> results = conjugate_gradient_x0x1({ 10,10 }, 50000, 1e-5);
std::cout << "最佳迭代位置:" << results[0] << " " << results[1] << std::endl;
system("pause");
return EXIT_SUCCESS;
}