ceres实现针孔相机bundle adjustment

参考ceres的tutorial
http://ceres-solver.org/nnls_tutorial.html#bundle-adjustment
实现针孔相机bundle adjustment
主要工作如下:
(1)生成理想观测数据
(2)将生成理想的数据作为BD的输入源
(3)将投影模型改为无畸变的针孔模型

#include <ros/ros.h>
#include <visualization_msgs/Marker.h>
#include<time.h>
 #include <unistd.h>
#include<iostream>
#include<eigen3/Eigen/Core>
#include<eigen3/Eigen/Geometry>
#include <opencv2/opencv.hpp>
#include <tf/transform_broadcaster.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include "ceres/ceres.h"
#include "ceres/rotation.h"
using namespace cv;
using namespace std;
using namespace Eigen;
struct ObsIndexPair{
   int camIndex;
   int pointIndex;
};
// Templated pinhole camera model for used with Ceres.  The camera is
// parameterized using 9 parameters: 3 for rotation, 3 for translation, 1 for
// focal length and 2 for radial distortion. The principal point is not modeled
// (i.e. it is assumed be located at the image center).
struct SnavelyReprojectionError {
  SnavelyReprojectionError(double observed_x, double observed_y)
      : observed_x(observed_x), observed_y(observed_y) {}
  template <typename T>
  bool operator()(const T* const camera,
                  const T* const point,
                  const T* const K,
                  T* residuals) const {
    // camera[0,1,2] are the angle-axis rotation.
    T p[3];
    ceres::AngleAxisRotatePoint(camera, point, p);
    // camera[3,4,5] are the translation.
    p[0] += camera[3];
    p[1] += camera[4];
    p[2] += camera[5];
    // Compute the center of distortion. The sign change comes from
    // the camera model that Noah Snavely's Bundler assumes, whereby
    // the camera coordinate system has a negative z axis.
    T xp = p[0] / p[2];
    T yp = p[1] / p[2];
    // Apply second and fourth order radial distortion.
    const T& fx = K[0];
    const T& fy = K[1];
    const T& cx = K[2];
    const T& cy = K[3];

//    // Compute final projected point position.

    T predicted_x = fx * xp+cx;
    T predicted_y = fy * yp+cy;
    // The error is the difference between the predicted and observed position.
    residuals[0] = predicted_x - observed_x;
    residuals[1] = predicted_y - observed_y;
    return true;
  }
  // Factory to hide the construction of the CostFunction object from
  // the client code.
  static ceres::CostFunction* Create(const double observed_x,
                                     const double observed_y) {
    return (new ceres::AutoDiffCostFunction<SnavelyReprojectionError,2,6,3,4>(
                new SnavelyReprojectionError(observed_x, observed_y)));
  }
  double observed_x;
  double observed_y;
};


int main( int argc, char** argv )
{
    ros::init(argc, argv, "projecttest");

    float len=8;
    //1.1 get para intrinsic
    float fx = 200,fy=200, w = 2000, h = 2000;
    float cx=w/2;
    float cy=h/2;
    cv::Mat K = (Mat_<float>(3, 3) <<
                 fx, 0, cx,
                 0, fy, cy,
                 0, 0, 1);
    float k1=0.0,k2=0,k3=0.0,k4=0.0,k5=0.0;
    cv::Mat coff=(Mat_<float>(1,4) <<k1,k2,k3,k4,k5);
    //2 get cam
    cv::Mat cv_R12 = (Mat_<float>(3, 3) <<
                      0, 0, 1,
                      -1, 0, 0,
                      0, -1, 0);
    cv::Mat cv_t12 = (Mat_<float>(3, 1) <<0, 0, len/2);

    //t
    vector<Mat> vCams;

    for(int i=0;i<4;i++)
    {
        Mat T = cv::Mat::eye(4,4,cv_R12.type());
        cv_R12.copyTo(T.rowRange(0,3).colRange(0,3));
        cv::Mat cv_ti = (Mat_<float>(3, 1) <<0, 0, (float) i);
        cv_ti.copyTo(T.rowRange(0,3).col(3));
        vCams.push_back(T);
    }

    //3.get 3d point
    //  //x>0 ABCD EFGH
    Point3f cube_center={len,0,len/2};
    Point3f A={cube_center.x-len/2,cube_center.y+len/2,cube_center.z-len/2};
    Point3f B={cube_center.x-len/2,cube_center.y-len/2,cube_center.z-len/2};
    Point3f C={cube_center.x+len/2,cube_center.y-len/2,cube_center.z-len/2};
    Point3f D={cube_center.x+len/2,cube_center.y+len/2,cube_center.z-len/2};
    cout<<"ABCD"<<A<< B<<C<<D<<endl;
    vector<Point3f> vp3d;
    vp3d.push_back(A);
    vp3d.push_back(B);
    vp3d.push_back(C);
    vp3d.push_back(D);

    //4.get project point2f

    int num_observations=0;
    vector<ObsIndexPair> vIndexPair;
    vector<pair<ObsIndexPair,Point2f>> sObs;
    for (size_t i=0;i<vCams.size();i++) {
        Mat T=vCams[i];
        cv::Mat R = T.rowRange(0,3).colRange(0,3);
        cv::Mat t = T.rowRange(0,3).col(3);
        Mat rvec;
        cv::Rodrigues(R,rvec);
        //cout<<"rvec"<<rvec<<"t"<<t<<endl;
        vector<Point2f> prj2d;
        cv::projectPoints(vp3d,rvec,t,K,coff,prj2d);
        for(size_t j=0;j<prj2d.size();j++)
        {
            num_observations++;
            ObsIndexPair pair;
            pair.camIndex=(int)i;
            pair.pointIndex=(int)j;
            // vIndexPair.push_back(pair);
            sObs.push_back(make_pair(pair,prj2d[j]));
        }
    }
//    vector<pair<ObsIndexPair,Point2f>>::iterator it;
//    for(it=sObs.begin();it!=sObs.end();it++)
//    {
//        cout<<"it camIndex"<<(it->first).camIndex<<endl;
//        cout<<"it pointIndex"<<(it->first).pointIndex<<endl;
//        cout<<" x= "<<(it->second).x<<endl;
//        cout<<" y= "<<(it->second).y<<endl;
//    }

    ceres::Problem problem;
    double* ppoints=new double[3*vp3d.size()];
    for(size_t i=0;i<vp3d.size();i++)
    {
        double* p=ppoints+3*i;
        *(p)=vp3d[i].x;
        *(p+1)=vp3d[i].y;
        *(p+2)=vp3d[i].z;
    }

    double* pcams=new double[6*vCams.size()];

    double* inPara=new double[4];
    *inPara=fx;
    *(inPara+1)=fy;
    *(inPara+2)=cx;
    *(inPara+3)=cy;
    for(size_t i=0;i<vCams.size();i++)
    {
        double* camPara=pcams+6*i;
        Mat T=vCams[i];
        cv::Mat R = T.rowRange(0,3).colRange(0,3);
        cv::Mat t = T.rowRange(0,3).col(3);
        Mat rvec;
        cv::Rodrigues(R,rvec);
        *(camPara)=rvec.at<float>(0,0);
        *(camPara+1)=rvec.at<float>(1,0);
        *(camPara+2)=rvec.at<float>(2,0);
        *(camPara+3)=t.at<float>(0,0);
        *(camPara+4)=t.at<float>(1,0);
        *(camPara+5)=t.at<float>(2,0);
    }

    double* point0=ppoints;

    double* camera0=pcams;

    std::cout<<"before bd point 0 pos x="<<*(point0)<<" y="<<*(point0+1)<<" z="<<*(point0+2)<<std::endl;
    std::cout<<"before bd camera 0 pos rx="<<*(camera0+0)*180/M_PI<<" ry="<<*(camera0+1)*180/M_PI<<" rz"<<*(camera0+2)*180/M_PI<<std::endl;
    std::cout<<"before bd camera 0 pos x="<<*(camera0+3)<<" y="<<*(camera0+4)<<" z"<<*(camera0+5)<<std::endl;

    for (int i = 0; i < num_observations; ++i)
    {
        // Each Residual block takes a point and a camera as input and outputs a 2
        // dimensional residual. Internally, the cost function stores the observed
        // image location and compares the reprojection against the observation.
        ceres::CostFunction* cost_function =
                SnavelyReprojectionError::Create(sObs[i].second.x,sObs[i].second.y);

        int pointIndex= sObs[i].first.pointIndex;

        double* point=ppoints+3*pointIndex;

        int camIndex=sObs[i].first.camIndex;
        double* camPara=pcams+6*camIndex;
        problem.AddResidualBlock(cost_function,
                                 NULL /* squared loss */,
                                 camPara,
                                 point,
                                 inPara);
    }
    // Make Ceres automatically detect the bundle structure. Note that the
    // standard solver, SPARSE_NORMAL_CHOLESKY, also works fine but it is slower
    // for standard bundle adjustment problems.
    ceres::Solver::Options options;
    options.linear_solver_type = ceres::DENSE_SCHUR;
    options.minimizer_progress_to_stdout = true;
    ceres::Solver::Summary summary;
    ceres::Solve(options, &problem, &summary);

    std::cout<<"after bd point 0 pos x="<<*(point0)<<" y="<<*(point0+1)<<" z="<<*(point0+2)<<std::endl;
    std::cout<<"after bd camera 0 pos rx="<<*(camera0+0)*180/M_PI<<" ry="<<*(camera0+1)*180/M_PI<<" rz"<<*(camera0+2)*180/M_PI<<std::endl;
    std::cout<<"after bd camera 0 pos x="<<*(camera0+3)<<" y="<<*(camera0+4)<<" z"<<*(camera0+5)<<std::endl;

  return 0;
}


评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值