ceres多种方法求解非线性最小二乘问题

问题同 文章
在这里插入图片描述

1 自动求导

1.1 单个数据

class AutoDiffCostFactorSingleData {
 public:
  AutoDiffCostFactorSingleData(double x, double y) : x_(x), y_(y) {}

  template <typename T>
  bool operator()(const T* const state, T* residual) const {
    assert(state != nullptr);
    assert(residual != nullptr);
    residual[0] = y_ - exp(state[0] * x_ * x_ + state[1] * x_ + state[2]);
    return true;
  }

  static ceres::CostFunction* Create(double x, double y) {
    return (new ceres::AutoDiffCostFunction<AutoDiffCostFactorSingleData, 1, 3>(
        new AutoDiffCostFactorSingleData(x, y)));
  }

 private:
  double x_;
  double y_;
};


  ceres::Problem problem;
  for (int i = 0; i < N; ++i) {
    problem.AddResidualBlock(
        AutoDiffCostFactorSingleData::Create(x_data[i], y_data[i]), nullptr,
        state.data());
  }

1.2 Batch数据

template <int N>
class AutoDiffConstFactorBatchData {
 public:
  AutoDiffConstFactorBatchData(const std::vector<double>& x_data,
                               const std::vector<double>& y_data)
      : x_data_(x_data), y_data_(y_data) {}

  template <typename T>
  bool operator()(const T* const state, T* residual) const {
    assert(state != nullptr);
    assert(residual != nullptr);
    for (int i = 0; i < x_data_.size(); ++i) {
      residual[i] = y_data_[i] - exp(state[0] * x_data_[i] * x_data_[i] +
                                     state[1] * x_data_[i] + state[2]);
    }
    return true;
  }

  static ceres::CostFunction* Create(const std::vector<double>& x_data,
                                     const std::vector<double>& y_data) {
    return (new ceres::AutoDiffCostFunction<AutoDiffConstFactorBatchData, N, 3>(
        new AutoDiffConstFactorBatchData(x_data, y_data)));
  }

 private:
  const std::vector<double> x_data_;
  const std::vector<double> y_data_;
};


  problem.AddResidualBlock(
      AutoDiffConstFactorBatchData<N>::Create(x_data, y_data), nullptr,
      state.data());

2 解析求导

关于ceres中jacobian和优化问题中jacobian的区别

最小二乘问题定义
1 2 ∑ i ∥ f i ( x i 1 , … , x i k ) ∥ 2 \frac{1}{2} \sum_i\left\|f_i\left(x_{i_1}, \ldots, x_{i_k}\right)\right\|^2 21ifi(xi1,,xik)2

  • 优化问题中的jacobian定义
    z = h ( x ) , J = ∂ h ( x ) ∂ x z = h(x), J = \frac{\partial h(x)}{\partial x }{} z=h(x),J=xh(x)

  • ceres中jacobian的定义是:
    jacobians[i][r * parameter_block_sizes_[i] + c] = ∂  residual  [ r ] ∂  parameters  [ i ] [ c ] \frac{\partial \text { residual }[r]}{\partial \text { parameters }[i][c]}  parameters [i][c] residual [r]
    J i = D i f ( x 1 , … , x k ) ∀ i ∈ { 1 , … , k } J_i=D_i f\left(x_1, \ldots, x_k\right) \quad \forall i \in\{1, \ldots, k\} Ji=Dif(x1,,xk)i{1,,k}
    residual = f ( x ) \text{residual} = f(x) residual=f(x)

    • 如果ceres中残差定义为 r = z − h ( x ) r = z-h(x) r=zh(x),那么ceres的jacobian将是正常优化算法中jacobian再乘以 -1
    • 如果ceres中残差定义为 r = h ( x ) − z r = h(x) - z r=h(x)z,那么ceres的jacobian将和正常优化算法中jacobian一致。
      选择不同的定义方式,那么margnalization中的计算也会对应改变。
class AnalyticCostFactor : public ceres::SizedCostFunction<1, 3> {
 public:
  AnalyticCostFactor(double x, double y) : x_(x), y_(y) {}

  bool Evaluate(double const* const* parameters, double* residuals,
                double** jacobians) const {
    const double a = parameters[0][0];
    const double b = parameters[0][1];
    const double c = parameters[0][2];
    residuals[0] = y_ - exp(a * x_ * x_ + b * x_ + c);

    if (jacobians) {
      jacobians[0][0] = -x_ * x_ * exp(a * x_ * x_ + b * x_ + c);
      jacobians[0][1] = -x_ * exp(a * x_ * x_ + b * x_ + c);
      jacobians[0][2] = -exp(a * x_ * x_ + b * x_ + c);
    }

    return true;
  }

  static ceres::CostFunction* Create(double x, double y) {
    return new AnalyticCostFactor(x, y);
  }

 private:
  double x_;
  double y_;
};

 ceres::Problem problem;
  for (int i = 0; i < N; ++i) {
    problem.AddResidualBlock(AnalyticCostFactor::Create(x_data[i], y_data[i]),
                             nullptr, state.data());
  }
  • 4
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值