来源(官网教程才是永远的神!!!!!!!!):Non-linear Least Squares — Ceres Solver
1.构建最小二乘问题
struct CostFunctor { template <typename T> bool operator()(const T* const x, T* residual) const { residual[0] = 10.0 - x[0]; return true; } }; //仿函数 —— 模板化 int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); // 初始化 double initial_x = 5.0; double x = initial_x; // Build the problem. Problem problem; // Set up the only cost function (also known as residual). This uses // auto-differentiation to obtain the derivative (jacobian). CostFunction* cost_function = new AutoDiffCostFunction<CostFunctor, 1, 1>(new CostFunctor); problem.AddResidualBlock(cost_function, nullptr, &x); // Run the solver! Solver::Options options; options.linear_solver_type = ceres::DENSE_QR; options.minimizer_progress_to_stdout = true; Solver::Summary summary; Solve(options, &problem, &summary); std::cout << summary.BriefReport() << "\n"; std::cout << "x : " << initial_x << " -> " << x << "\n"; return 0; }
一些基础用法直接按官网例子写就完了。
Derivatives(导数)方法:
1、Analytic derivatives(解析导数):要自己做导数运算,一般都是线性的函数时使用
class Rat43AnalyticOptimized : public SizedCostFunction<1,4> { public: Rat43AnalyticOptimized(const double x, const double y) : x_(x), y_(y) {} virtual ~Rat43AnalyticOptimized() {} virtual bool Evaluate(double const* const* parameters, double* residuals, double** jacobians) const { const double b1 = parameters[0][0]; const double b2 = parameters[0][1]; const double b3 = parameters[0][2]; const double b4 = parameters[0][3]; const double t1 = exp(b2 - b3 * x_); const double t2 = 1 + t1; const double t3 = pow(t2, -1.0 / b4); residuals[0] = b1 * t3 - y_; if (!jacobians) return true; double* jacobian = jacobians[0]; if (!jacobian) return true; const double t4 = pow(t2, -1.0 / b4 - 1); jacobian[0] = t3; jacobian[1] = -b1 * t1 * t4 / b4; jacobian[2] = -x_ * jacobian[1]; jacobian[3] = b1 * log(t2) * t3 / (b4 * b4); return true; } private: const double x_; const double y_; };
2、Numeric derivatives(数值导数):利用微分形式的求导,如下
取一个很小的h,可将导数近似为:
可以使用泰勒展开,进行前向差分误差估计(可用于简化复杂函数):
struct Rat43CostFunctor { Rat43CostFunctor(const double x, const double y) : x_(x), y_(y) {} //定义函数 bool operator()(const double* parameters, double* residuals) const { const double b1 = parameters[0]; const double b2 = parameters[1]; const double b3 = parameters[2]; const double b4 = parameters[3]; residuals[0] = b1 * pow(1.0 + exp(b2 - b3 * x_), -1.0 / b4) - y_; return true; } const double x_; const double y_; } //使用NumericDiffCostFunction构造代价函数,并包装实例Rat43CostFunctor CostFunction* cost_function = new NumericDiffCostFunction<Rat43CostFunctor, FORWARD, 1, 4>( new Rat43CostFunctor(x, y));
//Implementation Details class Rat43NumericDiffForward : public SizedCostFunction<1,4> { public: Rat43NumericDiffForward(const Rat43Functor* functor) : functor_(functor) {} virtual ~Rat43NumericDiffForward() {} virtual bool Evaluate(double const* const* parameters, double* residuals, double** jacobians) const { functor_(parameters[0], residuals); if (!jacobians) return true; double* jacobian = jacobians[0]; if (!jacobian) return true; const double f = residuals[0]; double parameters_plus_h[4]; for (int i = 0; i < 4; ++i) { std::copy(parameters, parameters + 4, parameters_plus_h); const double kRelativeStepSize = 1e-6; const double h = std::abs(parameters[i]) * kRelativeStepSize; //h为步长,其是变化的 parameters_plus_h[i] += h; double f_plus; functor_(parameters_plus_h, &f_plus); jacobian[i] = (f_plus - f) / h; } return true; } private: std::unique_ptr<Rat43Functor> functor_; };
3. Automatic Derivatives(自动求导):Ceres 使用 C++ 模板和运算符重载自动计算解析导数。
待补充。。。。