高斯牛顿法详解+推导+c++代码实现

高斯牛顿法详解+推导+c++代码实现

理论推导

在这里插入图片描述在这里插入图片描述
缺点:在这里插入图片描述

代码实践

#include <Eigen/Dense>
#include <Eigen/Sparse>
#include <iostream>
#include <iomanip>
#include <math.h>

using namespace std;
using namespace Eigen;

const double DERIV_STEP = 1e-5;
const int MAX_ITER = 100;

#define max(a,b) (((a)>(b))?(a):(b))

double func(const VectorXd& input, const VectorXd& output, const VectorXd& params, double objIndex)
{
	// obj = A * sin(Bx) + C * cos(D*x) - F
	double x1 = params(0);
	double x2 = params(1);
	double x3 = params(2);
	double x4 = params(3);

	double t = input(objIndex);
	double f = output(objIndex);

	return x1 * sin(x2 * t) + x3 * cos(x4 * t) - f;
}

//return vector make up of func() element.
VectorXd objF(const VectorXd& input, const VectorXd& output, const VectorXd& params)
{
	VectorXd obj(input.rows());
	for (int i = 0; i < input.rows(); i++)
		obj(i) = func(input, output, params, i);

	return obj;
}

//F = (f ^t * f)/2
double Func(const VectorXd& obj)
{
	//平方和,所有误差的平方和
	return obj.squaredNorm() / 2;
}

double Deriv(const VectorXd& input, const VectorXd& output, int objIndex, const VectorXd& params,
	int paraIndex)
{
	VectorXd para1 = params;
	VectorXd para2 = params;

	para1(paraIndex) -= DERIV_STEP;
	para2(paraIndex) += DERIV_STEP;

	double obj1 = func(input, output, para1, objIndex);
	double obj2 = func(input, output, para2, objIndex);

	return (obj2 - obj1) / (2 * DERIV_STEP);
}

MatrixXd Jacobin(const VectorXd& input, const VectorXd& output, const VectorXd& params)
{
	int rowNum = input.rows();
	int colNum = params.rows();

	MatrixXd Jac(rowNum, colNum);

	for (int i = 0; i < rowNum; i++)
	{
		for (int j = 0; j < colNum; j++)
		{
			Jac(i, j) = Deriv(input, output, i, params, j);
		}
	}
	return Jac;
}

void gaussNewton(const VectorXd& input, const VectorXd& output, VectorXd& params)
{
	int errNum = input.rows();      //error  num
	int paraNum = params.rows();    //parameter  num

	VectorXd obj(errNum);

	double last_sum = 0;

	int iterCnt = 0;
	while (iterCnt < MAX_ITER)
	{
		//得到误差
		obj = objF(input, output, params);

		double sum = 0;
		//误差平方和
		sum = Func(obj);

		cout << "Iterator index: " << iterCnt << endl;
		cout << "parameter: " << endl << params << endl;
		cout << "error sum: " << endl << sum << endl << endl;

		//如果两次之间的误差小于1e-12,就算结束。
		if (fabs(sum - last_sum) <= 1e-12)
			break;
		//否则代替上次误差继续迭代
		last_sum = sum;

		MatrixXd Jac = Jacobin(input, output, params);
		VectorXd delta(paraNum);
		delta = (Jac.transpose() * Jac).inverse() * Jac.transpose() * obj;

		params -= delta;
		iterCnt++;
	}
}
int main(int argc, char* argv[])
{
	// obj = A * sin(Bx) + C * cos(D*x) - F
	//there are 4 parameter: A, B, C, D.
	int num_params = 4;

	//generate random data using these parameter
	int total_data = 100;

	VectorXd input(total_data);
	VectorXd output(total_data);

	double A = 5, B = 1, C = 10, D = 2;
	//load observation data
	for (int i = 0; i < total_data; i++)
	{
		//generate a random variable [-10 10]
		double x = 20.0 * ((rand() % 1000) / 1000.0) - 10.0;
		double deltaY = 2.0 * (rand() % 1000) / 1000.0;
		double y = A * sin(B*x) + C * cos(D*x) + deltaY;

		input(i) = x;
		output(i) = y;
	}

	//gauss the parameters
	VectorXd params_gaussNewton(num_params);
	//init gauss
	params_gaussNewton << 1.8, 1.2, 8.2, 1.85;

	gaussNewton(input, output, params_gaussNewton);

	cout << "gauss newton parameter: " << endl << params_gaussNewton << endl << endl << endl;
}

参考

感谢下列参考,有问题的小伙伴@我哈。
https://blog.csdn.net/qq_42138662/article/details/109289129
https://blog.csdn.net/stihy/article/details/52737723

  • 3
    点赞
  • 15
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
下面是Ceres中实现牛顿高斯牛顿和阻尼最小二乘代码示例: 首先,我们定义一个继承自ceres::CostFunction的类,实现CostFunction::Evaluate方。这里以一个简单的非线性最小二乘问题为例,目标是最小化函数f(x) = (x-2)^2。 ```c++ struct CostFunctor { template <typename T> bool operator()(const T* const x, T* residual) const { residual[0] = T(x[0]) - T(2.0); return true; } }; ``` 接着,我们定义一个ceres::Problem对象,并向其中添加CostFunction对象。 ```c++ ceres::Problem problem; double x = 0.5; problem.AddResidualBlock(new ceres::AutoDiffCostFunction<CostFunctor, 1, 1>(new CostFunctor), nullptr, &x); ``` 然后,我们定义一个ceres::Solver::Options对象,设置优化器的参数,并调用ceres::Solve函数进行求解。 ```c++ ceres::Solver::Options options; options.minimizer_progress_to_stdout = true; ceres::Solver::Summary summary; ceres::Solve(options, &problem, &summary); std::cout << summary.FullReport() << std::endl; ``` 在Solve函数中,Ceres会自动选择合适的算进行求解。如果需要指定某个特定的算,可以在Solver::Options对象中设置相应的参数。例如,如果要使用牛顿进行求解,可以将options.minimizer_type设置为ceres::TRUST_REGION_NEWTON。 下面是完整的代码示例: ```c++ #include <iostream> #include "ceres/ceres.h" struct CostFunctor { template <typename T> bool operator()(const T* const x, T* residual) const { residual[0] = T(x[0]) - T(2.0); return true; } }; int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); double x = 0.5; ceres::Problem problem; problem.AddResidualBlock(new ceres::AutoDiffCostFunction<CostFunctor, 1, 1>(new CostFunctor), nullptr, &x); ceres::Solver::Options options; options.minimizer_progress_to_stdout = true; ceres::Solver::Summary summary; ceres::Solve(options, &problem, &summary); std::cout << summary.FullReport() << std::endl; return 0; } ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值