牛 顿 法

在这里插入图片描述
代码传送门:https://github.com/taifyang/optimization-method

python实现:

import sympy
import numpy as np

#f为要求极值的函数,x0为初始位置,max_iter为最大迭代次数,epsilon为相邻两次迭代的x改变量
def newton_x(f, x0, max_iter, epsilon):
    i = 0 #记录迭代次数的变量
    x0 = float(x0) #浮点数计算更快
    df = sympy.diff(f, x) #定义一阶导数
    d2f = sympy.diff(f, x, 2) #定义二阶导数
    while i < max_iter:
        gk = df.subs(x, x0)
        Gk = d2f.subs(x, x0)
        
        xnew = x0 - gk/Gk
        
        i += 1
        print('迭代第%d次:%.5f' %(i, xnew))      
        if abs(df.subs(x, xnew)-df.subs(x, x0)) < epsilon:
            break
        x0 = xnew
    return xnew

#f为要求极值的函数,X0为初始位置,max_iter为最大迭代次数,epsilon为相邻两次迭代的x改变量
def newton_x0x1(f, X0, max_iter, epsilon):
    i = 0 #记录迭代次数的变量
    X0[0], X0[1] = float(X0[0]), float(X0[1]) #浮点数计算更快
    df0 = sympy.diff(f, x0) #定义一阶导数
    df1 = sympy.diff(f, x1)
    d2f0 = sympy.diff(f, x0, 2) #定义二阶导数
    d2f1 = sympy.diff(f, x1, 2)
    df0df1 = sympy.diff(sympy.diff(f, x0), x1)
    while i < max_iter:
        gk = np.mat([float(df0.subs([(x0, X0[0]), (x1, X0[1])])), float(df1.subs([(x0, X0[0]), (x1, X0[1])]))]).T #梯度矩阵
        Gk = np.mat([[float(d2f0.subs([(x0, X0[0]), (x1, X0[1])])), float(df0df1.subs([(x0, X0[0]), (x1, X0[1])]))], \
            [float(df0df1.subs([(x0, X0[0]), (x1, X0[1])])), float(d2f1.subs([(x0, X0[0]), (x1, X0[1])]))]]) #海塞矩阵
        dk = -Gk.I*gk
        
        Xnew = [X0[0] + dk[0,0], X0[1] + dk[1,0]]
        
        i += 1
        print('迭代第%d次:[%.5f, %.5f]' %(i, Xnew[0], Xnew[1]))      
        if abs(f.subs([(x0, Xnew[0]), (x1, Xnew[1])])-f.subs([(x0, X0[0]), (x1, X0[1])])) < epsilon:
            break
        X0 = Xnew
    return Xnew

if __name__ == '__main__':      
    x = sympy.symbols("x") 
    x0 = sympy.symbols("x0")
    x1 = sympy.symbols("x1")
    result = newton_x(x**4-4*x, 10, 50000, 1e-5)
    print('最佳迭代的位置:%.5f' %result)
    result = newton_x0x1((x0-1)**2+(x1-1)**4, [10,10], 50000, 1e-5)
    print('最佳迭代位置:[%.5f, %.5f]' %(result[0], result[1]))

C++实现:

#include <iostream>
#include <vector>
#include <Eigen/Dense>

const double dx = 1e-3;

double f(double x)
{
	return pow(x, 4) - 4 * x;
}

double df(double x)
{
	//return 4 * pow(x, 3) - 4;
	return (f(x + dx) - f(x)) / dx;
}

double d2f(double x)
{
	//return 12 * pow(x, 2);
	return (df(x + dx) - df(x)) / dx;
}

double f(std::vector<double> X)
{
	return pow(X[0] - 1, 2) + pow(X[1] - 1, 4);
}

double df0(std::vector<double> X)
{
	//return 2 * (X[0] - 1);
	return (f({ X[0] + dx, X[1] }) - f(X)) / dx;
}

double df1(std::vector<double> X)
{
	//return 4 * pow(X[1] - 1, 3);
	return (f({ X[0], X[1] + dx }) - f(X)) / dx;
}

double d2f0(std::vector<double> X)
{
	//return 2;
	return (df0({ X[0] + dx, X[1] }) - df0(X)) / dx;
}

double d2f1(std::vector<double> X)
{
	//return 12 * pow(X[1] - 1, 2);
	return (df1({ X[0] , X[1] + dx }) - df1(X)) / dx;
}

double df0df1(std::vector<double> X)
{
	//return 0;
	return (df1({ X[0] + dx, X[1] }) - df1(X)) / dx;
}

double newton_x(double x0, int max_iter, double epsilon)
{
	int i = 0;
	double xnew;
	while (i < max_iter)
	{
		double gk = df(x0);
		double Gk = d2f(x0);
		
		xnew = x0 - gk/Gk;

		++i;
		std::cout << "迭代次数:" << i << " " << x0 << std::endl;
		if (abs(f(xnew) - f(x0)) < epsilon)
			break;
		x0 = xnew;
	}
	return xnew;
}

std::vector<double> newton_x0x1(std::vector<double> X0, int max_iter, double epsilon)
{
	int i = 0;
	std::vector<double> Xnew;
	while (i < max_iter)
	{
		Eigen::Vector2f gk;
		gk << df0(X0), df1(X0);
		Eigen::Matrix2f Gk;
		Gk << d2f0(X0), df0df1(X0), df0df1(X0), d2f1(X0);
		Eigen::Vector2f dk = -Gk.inverse()*gk;
		
		Xnew = { X0[0] + dk(0), X0[1] + dk(1) };

		++i;
		std::cout << "迭代次数:" << i << " " << X0[0] << " " << X0[1] << std::endl;
		if (abs(f(Xnew) - f(X0)) < epsilon)
			break;
		X0 = Xnew;
	}
	return X0;
}


int main(int argc, char* argv[])
{
	double result = newton_x(10, 50000, 1e-5);
	std::cout << "最佳迭代位置:" << result << std::endl;

	std::vector<double> results = newton_x0x1({ 10,10 }, 50000, 1e-5);
	std::cout << "最佳迭代位置:" << results[0] << " " << results[1] << std::endl;

	system("pause");
	return EXIT_SUCCESS;
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

给算法爸爸上香

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值