在ceres中实现ICP优化(仅优化位姿)

一.仅优化位姿

构造类和代价函数:

struct ICPCeres
{
    ICPCeres ( Point3f uvw,Point3f xyz ) : _uvw(uvw),_xyz(xyz) {}
    // 残差的计算
    template <typename T>
    bool operator() (
            const T* const camera,     // 模型参数,有4维
            T* residual ) const     // 残差
    {
        T p[3];
        T point[3];
        point[0]=T(_xyz.x);
        point[1]=T(_xyz.y);
        point[2]=T(_xyz.z);
        AngleAxisRotatePoint(camera, point, p);//计算RP
        p[0] += camera[3]; p[1] += camera[4]; p[2] += camera[5];//相机坐标2
        residual[0] = T(_uvw.x)-p[0];
        residual[1] = T(_uvw.y)-p[1];
        residual[2] = T(_uvw.z)-p[2];
        return true;
    }
    static ceres::CostFunction* Create(const Point3f uvw,const Point3f xyz) {
        return (new ceres::AutoDiffCostFunction<ICPCeres, 3, 6>(
                new ICPCeres(uvw,xyz)));
    }
    const Point3f _uvw;
    const Point3f _xyz;
};

其中,形参uvw是第一帧的相机坐标,xyz是第二帧的相机坐标,由于仅仅优化位姿,我们假设第二帧相机坐标确定,AngleAxisRotatePoint在头文件rotation.h中,它根据相机位姿(旋转向量和平移向量表示,构成的6维数组,不对内参焦距进行优化,不考虑相机畸变),第二帧的相机位姿(三维数组),计算得到RP,结合平移向量得到计算后第一帧相机坐标,进而计算误差e=p-(Rp‘+t)。

位姿初值:

double camera[6]={0,1,2,0,0,0};

最小二乘问题的构建:

ceres::Problem problem;
    for (int i = 0; i < pts2.size(); ++i)
    {
        ceres::CostFunction* cost_function =
                ICPCeres::Create(pts2[i],pts1[i]);
        problem.AddResidualBlock(cost_function,
                                 NULL /* squared loss */,
                                 camera);
    }

配置求解器与结构输出:

    ceres::Solver::Options options;
    options.linear_solver_type = ceres::DENSE_SCHUR;
    options.minimizer_progress_to_stdout = true;
    ceres::Solver::Summary summary;
    ceres::Solve(options, &problem, &summary);
    std::cout << summary.FullReport() << "\n";

    Mat R_vec = (Mat_<double>(3,1) << camera[0],camera[1],camera[2]);//数组转cv向量
    Mat R_cvest;
    Rodrigues(R_vec,R_cvest);//罗德里格斯公式,旋转向量转旋转矩阵
    cout<<"R_cvest="<<R_cvest<<endl;
    Eigen::Matrix<double,3,3> R_est;
    cv2eigen(R_cvest,R_est);//cv矩阵转eigen矩阵
    cout<<"R_est="<<R_est<<endl;
    Eigen::Vector3d t_est(camera[3],camera[4],camera[5]);
    cout<<"t_est="<<t_est<<endl;
    Eigen::Isometry3d T(R_est);//构造变换矩阵与输出
    T.pretranslate(t_est);
    cout<<T.matrix()<<endl;

优化结果:

/home/luoyongheng/study_slam/ch06/ceres_curve_fitting/cmake-build-debug/ICPCeres 1.png 2.png 1_depth.png 2_depth.png
[ INFO:0] Initialize OpenCL runtime...
-- Max dist : 95.000000 
-- Min dist : 7.000000 
一共找到了81组匹配点
3d-3d pairs: 75
W=  11.8688 -0.717698   1.89486
 -1.88065   3.83391  -5.78219
  3.25846  -5.82734   9.65267
U=  0.592295  -0.805658 -0.0101195
 -0.418171  -0.318113   0.850845
  0.688709   0.499719   0.525319
V=   0.64736  -0.761401 -0.0345329
 -0.388765  -0.368829   0.844291
  0.655581   0.533135   0.534772
ICP via SVD results: 
R = [0.9972065647956205, 0.05834273442898395, -0.04663895869192642;
 -0.05787745545449197, 0.9982601221728663, 0.01126626067193232;
 0.04721511705620776, -0.008535444848613738, 0.9988482762174669]
t = [0.1379879629890436;
 -0.06551699470729977;
 -0.02981649388290597]
R_inv = [0.9972065647956205, -0.05787745545449197, 0.04721511705620776;
 0.05834273442898395, 0.9982601221728663, -0.008535444848613738;
 -0.04663895869192642, 0.01126626067193232, 0.9988482762174669]
t_inv = [-0.1399866702492463;
 0.0570979110227253;
 0.0369558999644324]
calling bundle adjustment
p1 = [-0.0374123, -0.830816, 2.7448]
p2 = [-0.0111479, -0.746763, 2.7652]
(R*p2+t) = [-0.04566300488482988;
 -0.7791822151698655;
 2.738046267661637]

p1 = [-0.243698, -0.117719, 1.5848]
p2 = [-0.299118, -0.0975683, 1.6558]
(R*p2+t) = [-0.2432120544305981;
 -0.1269486029625337;
 1.610786345002579]

p1 = [-0.627753, 0.160186, 1.3396]
p2 = [-0.709645, 0.159033, 1.4212]
(R*p2+t) = [-0.6266796777024646;
 0.1503229238263246;
 1.354883323538179]

p1 = [-0.323443, 0.104873, 1.4266]
p2 = [-0.399079, 0.12047, 1.4838]
(R*p2+t) = [-0.322150852559034;
 0.09455772952719488;
 1.432403794814274]

p1 = [-0.627221, 0.101454, 1.3116]
p2 = [-0.709709, 0.100216, 1.3998]
(R*p2+t) = [-0.6291763602679334;
 0.09137127679150189;
 1.334006907588644]

iter      cost      cost_change  |gradient|   |step|    tr_ratio  tr_radius  ls_iter  iter_time  total_time
   0  1.187529e+02    0.00e+00    1.49e+02   0.00e+00   0.00e+00  1.00e+04        0    6.01e-05    1.56e-04
   1  3.468083e+01    8.41e+01    5.94e+01   1.29e+00   8.95e-01  1.98e+04        0    1.14e-04    2.93e-04
   2  6.366072e+00    2.83e+01    3.48e+01   1.47e+00   1.06e+00  5.93e+04        0    6.39e-05    3.67e-04
   3  2.387209e+00    3.98e+00    1.63e+01   7.92e-01   8.04e-01  7.66e+04        0    5.98e-05    4.34e-04
   4  9.092359e-01    1.48e+00    3.68e-01   2.88e-01   1.00e+00  2.30e+05        0    7.12e-05    5.12e-04
   5  9.076072e-01    1.63e-03    1.65e-02   1.80e-02   1.05e+00  6.89e+05        0    7.32e-05    5.91e-04
   6  9.075968e-01    1.05e-05    2.11e-03   2.14e-03   1.13e+00  2.07e+06        0    5.82e-05    6.56e-04

Solver Summary (v 2.0.0-eigen-(3.2.0)-lapack-suitesparse-(4.2.1)-cxsparse-(3.1.2)-eigensparse-openmp-no_tbb)

                                     Original                  Reduced
Parameter blocks                            1                        1
Parameters                                  6                        6
Residual blocks                            75                       75
Residuals                                 225                      225

Minimizer                        TRUST_REGION

Dense linear algebra library            EIGEN
Trust region strategy     LEVENBERG_MARQUARDT

                                        Given                     Used
Linear solver                     DENSE_SCHUR              DENSE_SCHUR
Threads                                     1                        1
Linear solver ordering              AUTOMATIC                        1
Schur structure                         3,6,0                    d,d,d

Cost:
Initial                          1.187529e+02
Final                            9.075968e-01
Change                           1.178453e+02

Minimizer iterations                        7
Successful steps                            7
Unsuccessful steps                          0

Time (in seconds):
Preprocessor                         0.000096

  Residual only evaluation           0.000068 (7)
  Jacobian & residual evaluation     0.000173 (7)
  Linear solver                      0.000224 (7)
Minimizer                            0.000606

Postprocessor                        0.000003
Total                                0.000705

Termination:                      CONVERGENCE (Function tolerance reached. |cost_change|/cost: 1.878852e-07 <= 1.000000e-06)

R_cvest=[0.9972083771125694, -0.05784482124586175, 0.04721683250229661;
 0.05830249021928786, 0.9982638507212928, -0.008372811793325479;
 -0.04665053323109476, 0.01110229697544197, 0.9988495716328478]
R_est=   0.997208  -0.0578448   0.0472168
  0.0583025    0.998264 -0.00837281
 -0.0466505   0.0111023     0.99885
t_est=-0.139986
0.0568282
0.0369365
   0.997208  -0.0578448   0.0472168   -0.139986
  0.0583025    0.998264 -0.00837281   0.0568282
 -0.0466505   0.0111023     0.99885   0.0369365
          0           0           0           1

完整代码:

#include <iostream>
#include <ceres/ceres.h>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/eigen.hpp>
#include <Eigen/Core>
#include <Eigen/Geometry>
#include <Eigen/SVD>
#include <ceres/ceres.h>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/solvers/eigen/linear_solver_eigen.h>
#include <g2o/types/sba/types_six_dof_expmap.h>
#include <chrono>
#include "rotation.h"
using namespace std;
using namespace cv;
using namespace Eigen;
struct ICPCeres
{
    ICPCeres ( Point3f uvw,Point3f xyz ) : _uvw(uvw),_xyz(xyz) {}
    // 残差的计算
    template <typename T>
    bool operator() (
            const T* const camera,     // 模型参数,有4维
            T* residual ) const     // 残差
    {
        T p[3];
        T point[3];
        point[0]=T(_xyz.x);
        point[1]=T(_xyz.y);
        point[2]=T(_xyz.z);
        AngleAxisRotatePoint(camera, point, p);//计算RP
        p[0] += camera[3]; p[1] += camera[4]; p[2] += camera[5];//相机坐标2
        residual[0] = T(_uvw.x)-p[0];
        residual[1] = T(_uvw.y)-p[1];
        residual[2] = T(_uvw.z)-p[2];
        return true;
    }
    static ceres::CostFunction* Create(const Point3f uvw,const Point3f xyz) {
        return (new ceres::AutoDiffCostFunction<ICPCeres, 3, 6>(
                new ICPCeres(uvw,xyz)));
    }
    const Point3f _uvw;
    const Point3f _xyz;
};
void find_feature_matches (
        const Mat& img_1, const Mat& img_2,
        std::vector<KeyPoint>& keypoints_1,
        std::vector<KeyPoint>& keypoints_2,
        std::vector< DMatch >& matches );

// 像素坐标转相机归一化坐标
Point2d pixel2cam ( const Point2d& p, const Mat& K );

void pose_estimation_3d3d (
        const vector<Point3f>& pts1,
        const vector<Point3f>& pts2,
        Mat& R, Mat& t
);
int main ( int argc, char** argv ) {
    if (argc != 5) {
        cout << "usage: pose_estimation_3d3d img1 img2 depth1 depth2" << endl;
        return 1;
    }
    double camera[6]={0,1,2,0,0,0};
    //-- 读取图像
    Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_COLOR);
    Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_COLOR);

    vector<KeyPoint> keypoints_1, keypoints_2;
    vector<DMatch> matches;
    find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
    cout << "一共找到了" << matches.size() << "组匹配点" << endl;

    // 建立3D点
    Mat depth1 = imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
    Mat depth2 = imread(argv[4], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
    Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
    vector<Point3f> pts1, pts2;

    for (DMatch m:matches) {
        ushort d1 = depth1.ptr<unsigned short>(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
        ushort d2 = depth2.ptr<unsigned short>(int(keypoints_2[m.trainIdx].pt.y))[int(keypoints_2[m.trainIdx].pt.x)];
        if (d1 == 0 || d2 == 0)   // bad depth
            continue;
        Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
        Point2d p2 = pixel2cam(keypoints_2[m.trainIdx].pt, K);
        float dd1 = float(d1) / 5000.0;
        float dd2 = float(d2) / 5000.0;
        pts1.push_back(Point3f(p1.x * dd1, p1.y * dd1, dd1));
        pts2.push_back(Point3f(p2.x * dd2, p2.y * dd2, dd2));
    }

    cout << "3d-3d pairs: " << pts1.size() << endl;
    Mat R, t;
    pose_estimation_3d3d(pts1, pts2, R, t);
    cout << "ICP via SVD results: " << endl;
    cout << "R = " << R << endl;
    cout << "t = " << t << endl;
    cout << "R_inv = " << R.t() << endl;
    cout << "t_inv = " << -R.t() * t << endl;

    cout << "calling bundle adjustment" << endl;
    // verify p1 = R*p2 + t
    for ( int i=0; i<5; i++ )
    {
        cout<<"p1 = "<<pts1[i]<<endl;
        cout<<"p2 = "<<pts2[i]<<endl;
        cout<<"(R*p2+t) = "<<
            R * (Mat_<double>(3,1)<<pts2[i].x, pts2[i].y, pts2[i].z) + t
            <<endl;
        cout<<endl;
    }
    ceres::Problem problem;
    for (int i = 0; i < pts2.size(); ++i)
    {
        ceres::CostFunction* cost_function =
                ICPCeres::Create(pts2[i],pts1[i]);
        problem.AddResidualBlock(cost_function,
                                 NULL /* squared loss */,
                                 camera);
    }
    ceres::Solver::Options options;
    options.linear_solver_type = ceres::DENSE_SCHUR;
    options.minimizer_progress_to_stdout = true;
    ceres::Solver::Summary summary;
    ceres::Solve(options, &problem, &summary);
    std::cout << summary.FullReport() << "\n";

    Mat R_vec = (Mat_<double>(3,1) << camera[0],camera[1],camera[2]);//数组转cv向量
    Mat R_cvest;
    Rodrigues(R_vec,R_cvest);//罗德里格斯公式,旋转向量转旋转矩阵
    cout<<"R_cvest="<<R_cvest<<endl;
    Eigen::Matrix<double,3,3> R_est;
    cv2eigen(R_cvest,R_est);//cv矩阵转eigen矩阵
    cout<<"R_est="<<R_est<<endl;
    Eigen::Vector3d t_est(camera[3],camera[4],camera[5]);
    cout<<"t_est="<<t_est<<endl;
    Eigen::Isometry3d T(R_est);//构造变换矩阵与输出
    T.pretranslate(t_est);
    cout<<T.matrix()<<endl;

    return 0;
}
void find_feature_matches ( const Mat& img_1, const Mat& img_2,
                            std::vector<KeyPoint>& keypoints_1,
                            std::vector<KeyPoint>& keypoints_2,
                            std::vector< DMatch >& matches )
{
    //-- 初始化
    Mat descriptors_1, descriptors_2;
    // used in OpenCV3
    Ptr<FeatureDetector> detector = ORB::create();
    Ptr<DescriptorExtractor> descriptor = ORB::create();
    // use this if you are in OpenCV2
    // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" );
    // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" );
    Ptr<DescriptorMatcher> matcher  = DescriptorMatcher::create("BruteForce-Hamming");
    //-- 第一步:检测 Oriented FAST 角点位置
    detector->detect ( img_1,keypoints_1 );
    detector->detect ( img_2,keypoints_2 );

    //-- 第二步:根据角点位置计算 BRIEF 描述子
    descriptor->compute ( img_1, keypoints_1, descriptors_1 );
    descriptor->compute ( img_2, keypoints_2, descriptors_2 );

    //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
    vector<DMatch> match;
    // BFMatcher matcher ( NORM_HAMMING );
    matcher->match ( descriptors_1, descriptors_2, match );

    //-- 第四步:匹配点对筛选
    double min_dist=10000, max_dist=0;

    //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
    for ( int i = 0; i < descriptors_1.rows; i++ )
    {
        double dist = match[i].distance;
        if ( dist < min_dist ) min_dist = dist;
        if ( dist > max_dist ) max_dist = dist;
    }

    printf ( "-- Max dist : %f \n", max_dist );
    printf ( "-- Min dist : %f \n", min_dist );

    //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
    for ( int i = 0; i < descriptors_1.rows; i++ )
    {
        if ( match[i].distance <= max ( 2*min_dist, 30.0 ) )
        {
            matches.push_back ( match[i] );
        }
    }
}

Point2d pixel2cam ( const Point2d& p, const Mat& K )
{
    return Point2d
            (
                    ( p.x - K.at<double> ( 0,2 ) ) / K.at<double> ( 0,0 ),
                    ( p.y - K.at<double> ( 1,2 ) ) / K.at<double> ( 1,1 )
            );
}

void pose_estimation_3d3d (
        const vector<Point3f>& pts1,
        const vector<Point3f>& pts2,
        Mat& R, Mat& t
)
{
    Point3f p1, p2;     // center of mass
    int N = pts1.size();
    for ( int i=0; i<N; i++ )
    {
        p1 += pts1[i];
        p2 += pts2[i];
    }
    p1 = Point3f( Vec3f(p1) /  N);
    p2 = Point3f( Vec3f(p2) / N);
    vector<Point3f>     q1 ( N ), q2 ( N ); // remove the center
    for ( int i=0; i<N; i++ )
    {
        q1[i] = pts1[i] - p1;
        q2[i] = pts2[i] - p2;
    }

    // compute q1*q2^T
    Eigen::Matrix3d W = Eigen::Matrix3d::Zero();
    for ( int i=0; i<N; i++ )
    {
        W += Eigen::Vector3d ( q1[i].x, q1[i].y, q1[i].z ) * Eigen::Vector3d ( q2[i].x, q2[i].y, q2[i].z ).transpose();
    }
    cout<<"W="<<W<<endl;

    // SVD on W
    Eigen::JacobiSVD<Eigen::Matrix3d> svd ( W, Eigen::ComputeFullU|Eigen::ComputeFullV );
    Eigen::Matrix3d U = svd.matrixU();
    Eigen::Matrix3d V = svd.matrixV();
    cout<<"U="<<U<<endl;
    cout<<"V="<<V<<endl;

    Eigen::Matrix3d R_ = U* ( V.transpose() );
    Eigen::Vector3d t_ = Eigen::Vector3d ( p1.x, p1.y, p1.z ) - R_ * Eigen::Vector3d ( p2.x, p2.y, p2.z );

    // convert to cv::Mat
    R = ( Mat_<double> ( 3,3 ) <<
                               R_ ( 0,0 ), R_ ( 0,1 ), R_ ( 0,2 ),
            R_ ( 1,0 ), R_ ( 1,1 ), R_ ( 1,2 ),
            R_ ( 2,0 ), R_ ( 2,1 ), R_ ( 2,2 )
    );
    t = ( Mat_<double> ( 3,1 ) << t_ ( 0,0 ), t_ ( 1,0 ), t_ ( 2,0 ) );
}

其中:rotation.h为:

#ifndef ROTATION_H
#define ROTATION_H

#include <algorithm>
#include <cmath>
#include <limits>

//
// math functions needed for rotation conversion. 

// dot and cross production 

template<typename T> 
inline T DotProduct(const T x[3], const T y[3]) {
  return (x[0] * y[0] + x[1] * y[1] + x[2] * y[2]);
}

template<typename T>
inline void CrossProduct(const T x[3], const T y[3], T result[3]){
  result[0] = x[1] * y[2] - x[2] * y[1];
  result[1] = x[2] * y[0] - x[0] * y[2];
  result[2] = x[0] * y[1] - x[1] * y[0];
}


//


// Converts from a angle anxis to quaternion : 
template<typename T>
inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion){
  const T& a0 = angle_axis[0];
  const T& a1 = angle_axis[1];
  const T& a2 = angle_axis[2];
  const T theta_squared = a0 * a0 + a1 * a1 + a2 * a2;
  
  
  if(theta_squared > T(std::numeric_limits<double>::epsilon()) ){
    const T theta = sqrt(theta_squared);
    const T half_theta = theta * T(0.5);
    const T k = sin(half_theta)/theta;
    quaternion[0] = cos(half_theta);
    quaternion[1] = a0 * k;
    quaternion[2] = a1 * k;
    quaternion[3] = a2 * k;
  }
  else{ // in case if theta_squared is zero
    const T k(0.5);
    quaternion[0] = T(1.0);
    quaternion[1] = a0 * k;
    quaternion[2] = a1 * k;
    quaternion[3] = a2 * k;
  }
}


template<typename T>
inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis){
  const T& q1 = quaternion[1];
  const T& q2 = quaternion[2];
  const T& q3 = quaternion[3];
  const T sin_squared_theta = q1 * q1 + q2 * q2 + q3 * q3;
  
  // For quaternions representing non-zero rotation, the conversion
  // is numercially stable
  if(sin_squared_theta > T(std::numeric_limits<double>::epsilon()) ){
    const T sin_theta = sqrt(sin_squared_theta);
    const T& cos_theta = quaternion[0];
    
    // If cos_theta is negative, theta is greater than pi/2, which
    // means that angle for the angle_axis vector which is 2 * theta
    // would be greater than pi...
    
    const T two_theta = T(2.0) * ((cos_theta < 0.0)
				  ? atan2(-sin_theta, -cos_theta)
				  : atan2(sin_theta, cos_theta));
    const T k = two_theta / sin_theta;
    
    angle_axis[0] = q1 * k;
    angle_axis[1] = q2 * k;
    angle_axis[2] = q3 * k;
  }
  else{
    // For zero rotation, sqrt() will produce NaN in derivative since
    // the argument is zero. By approximating with a Taylor series, 
    // and truncating at one term, the value and first derivatives will be 
    // computed correctly when Jets are used..
    const T k(2.0);
    angle_axis[0] = q1 * k;
    angle_axis[1] = q2 * k;
    angle_axis[2] = q3 * k;
  }
  
}


template<typename T>
inline void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
  const T theta2 = DotProduct(angle_axis, angle_axis);
  if (theta2 > T(std::numeric_limits<double>::epsilon())) {
    // Away from zero, use the rodriguez formula
    //
    //   result = pt costheta +
    //            (w x pt) * sintheta +
    //            w (w . pt) (1 - costheta)
    //
    // We want to be careful to only evaluate the square root if the
    // norm of the angle_axis vector is greater than zero. Otherwise
    // we get a division by zero.
    //
    const T theta = sqrt(theta2);
    const T costheta = cos(theta);
    const T sintheta = sin(theta);
    const T theta_inverse = 1.0 / theta;

    const T w[3] = { angle_axis[0] * theta_inverse,
                     angle_axis[1] * theta_inverse,
                     angle_axis[2] * theta_inverse };

    // Explicitly inlined evaluation of the cross product for
    // performance reasons.
    /*const T w_cross_pt[3] = { w[1] * pt[2] - w[2] * pt[1],
                              w[2] * pt[0] - w[0] * pt[2],
                              w[0] * pt[1] - w[1] * pt[0] };*/
    T w_cross_pt[3];
    CrossProduct(w, pt, w_cross_pt);                          


    const T tmp = DotProduct(w, pt) * (T(1.0) - costheta);
    //    (w[0] * pt[0] + w[1] * pt[1] + w[2] * pt[2]) * (T(1.0) - costheta);

    result[0] = pt[0] * costheta + w_cross_pt[0] * sintheta + w[0] * tmp;
    result[1] = pt[1] * costheta + w_cross_pt[1] * sintheta + w[1] * tmp;
    result[2] = pt[2] * costheta + w_cross_pt[2] * sintheta + w[2] * tmp;
  } else {
    // Near zero, the first order Taylor approximation of the rotation
    // matrix R corresponding to a vector w and angle w is
    //
    //   R = I + hat(w) * sin(theta)
    //
    // But sintheta ~ theta and theta * w = angle_axis, which gives us
    //
    //  R = I + hat(w)
    //
    // and actually performing multiplication with the point pt, gives us
    // R * pt = pt + w x pt.
    //
    // Switching to the Taylor expansion near zero provides meaningful
    // derivatives when evaluated using Jets.
    //
    // Explicitly inlined evaluation of the cross product for
    // performance reasons.
    /*const T w_cross_pt[3] = { angle_axis[1] * pt[2] - angle_axis[2] * pt[1],
                              angle_axis[2] * pt[0] - angle_axis[0] * pt[2],
                              angle_axis[0] * pt[1] - angle_axis[1] * pt[0] };*/
    T w_cross_pt[3];
    CrossProduct(angle_axis, pt, w_cross_pt); 

    result[0] = pt[0] + w_cross_pt[0];
    result[1] = pt[1] + w_cross_pt[1];
    result[2] = pt[2] + w_cross_pt[2];
  }
}

#endif // rotation.h

 

  • 8
    点赞
  • 34
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值