第九讲 设计前端 3D-2D位姿计算(一)

注:代码引用高翔的slambook,感谢他的开源奉献,祝贺他毕业答辩顺利圆满。

设计最简单的基于3D-2D的位姿计算

  • 1.读取第一帧的深度图和彩色图(进行对齐操作,并通过相机内参进行矫正),进行如下操作:

    检测特征点,计算描述符,通过判断特征点对应的深度值的有效性进一步筛选.


  • 2.读取第二帧的彩色图,检测特征点,计算描述符。
  • 3.通过比较描述符的汉明距离来寻找最佳匹配。
  • 4.构建3d 2d的数组,通过 solvePnPRansac 函数获得相机位姿

代码如下

cv::Ptr<cv::ORB> orb_;  // orb detector and computer
orb_ = cv::ORB::create ( 500, 1.2, 8 );
vector<cv::KeyPoint>    keypoints1;    // keypoints in current frame
Mat descriptors1;
vector<cv::Point3f> pts_3d_valid;
Mat descriptors1_valid;
vector<cv::DMatch> feature_matches_;
Mat K = ( Mat_<double>(3,3) << 517.3, 0, 325.1, 0, 516.5, 249.7, 0, 0, 1 );
Mat img_1     =imread("./rgb/3.026000.png");
Mat img_1depth=imread("./depth/3.026000.png",-1);
imshow("rgb",img_1);
imshow("depth",img_1depth);
waitKey(1);

orb_->detect ( img_1, keypoints1 );
cout<<"keypoints1.size is "<<keypoints1.size()<<endl;

orb_->compute ( img_1, keypoints1, descriptors1 );
cout<<"descriptors1.size is "<<descriptors1.size()<<endl;

descriptors1_valid = Mat();


ofstream out("3Dsetpoint_valid_choice.txt",ios::out);
out<<"K camera intrinsic is "<<K<<endl;
out<<"keypoints1.size() is "<<keypoints1.size()<<endl;
for ( size_t i=0; i<keypoints1.size(); i++ )
{
    int x = cvRound(keypoints1[i].pt.x);
    int y = cvRound(keypoints1[i].pt.y);
    double d =0;//出错的问题在于求取深度值误判为-1,而不是先判断邻域内是否有真值存在
    ushort temp= img_1depth.ptr<ushort>(y)[x];
    if ( temp!=0 )
    {
            d= double(temp/5000.0);
    }
    else
    {
        // check the nearby points
        int dx[4] = {-1,0,1,0};
        int dy[4] = {0,-1,0,1};
        for ( int i=0; i<4; i++ )
        {
            temp = img_1depth.ptr<ushort>( y+dy[i] )[x+dx[i]];
            if ( temp!=0 )
            {
                d= double(temp/5000.0);
            }
        }
    }
    if(d==0)
        d=-1;
        Vector3d p_cam = Vector3d((keypoints1[i].pt.x-K.at<double>(0,2))*d/K.at<double>(0,0), (keypoints1[i].pt.y-K.at<double>(1,2))*d/K.at<double>(1,1), d );
    if ( d > 0)
    {
        Vector3d p_cam = Vector3d((keypoints1[i].pt.x-K.at<double>(0,2))*d/K.at<double>(0,0), (keypoints1[i].pt.y-K.at<double>(1,2))*d/K.at<double>(1,1), d );
        pts_3d_valid.push_back( cv::Point3f( p_cam(0,0), p_cam(1,0), p_cam(2,0) ));
        descriptors1_valid.push_back(descriptors1.row(i));
    }
}

cout<<"pts_3d_valid.size is "<<pts_3d_valid.size()<<endl;
cout<<"descriptors1_valid.size is "<<descriptors1_valid.size()<<endl;

cv::Ptr<cv::ORB> orb_2;  // orb detector and computer
orb_2 = cv::ORB::create ( 500, 1.2, 8 );
vector<cv::KeyPoint>    keypoints2;    // keypoints in current frame
Mat                     descriptors2;  // descriptor in current frame
Mat img_2=imread("./3.183000.png",-1);

orb_2->detect ( img_2, keypoints2 );
orb_2->compute ( img_2, keypoints2, descriptors2 );
cout<<"descriptors2.size is "<<descriptors2.size()<<endl;

// match desp_ref and desp_curr, use OpenCV's brute force match
vector<cv::DMatch> matches;
cv::BFMatcher matcher ( cv::NORM_HAMMING );
matcher.match ( descriptors1_valid, descriptors2, matches );

cout<<"descriptors1_valid.size is "<<descriptors1_valid.size()<<endl;
cout<<"descriptors2.size is "<<descriptors2.size()<<endl;
cout<<"matches.size is "<<matches.size()<<endl;
// select the best matches
float min_dis = std::min_element (
            matches.begin(), matches.end(),
                    [] ( const cv::DMatch& m1, const cv::DMatch& m2 )
{
    return m1.distance < m2.distance;
} )->distance;

feature_matches_.clear();
for ( cv::DMatch& m : matches )
{
    if ( m.distance < max<float> ( min_dis*2, 30.0 ) )
    {
        feature_matches_.push_back(m);
    }
}
cout<<"good matches: "<<feature_matches_.size()<<endl;

//construct the 3d 2d observations
vector<cv::Point3f> pts3d;
vector<cv::Point2f> pts2d;
for ( cv::DMatch m:feature_matches_ )
{
    pts3d.push_back( pts_3d_valid[m.queryIdx] );
    pts2d.push_back( keypoints2[m.trainIdx].pt );
}
cout<<"pts3d.size "<<pts3d.size()<<endl;
cout<<"pts2d.size "<<pts2d.size()<<endl;


Mat rvec, tvec, inliers;
cv::solvePnPRansac( pts3d, pts2d, K, Mat(), rvec, tvec, false, 100, 4.0, 0.99, inliers );

int num_inliers_ = inliers.rows;
cout<<"pnp inliers: "<<num_inliers_<<endl;
SE3 T_c_r_estimated_ = SE3(
    Sophus::SO3(rvec.at<double>(0,0), rvec.at<double>(1,0), rvec.at<double>(2,0)),
    Vector3d( tvec.at<double>(0,0), tvec.at<double>(1,0), tvec.at<double>(2,0))
);
cout<<"Sophus::SO3(rvec.at<double>(0,0), rvec.at<double>(1,0), rvec.at<double>(2,0)) "<<Sophus::SO3(rvec.at<double>(0,0), rvec.at<double>(1,0), rvec.at<double>(2,0))<<endl;
cout<<"Vector3d( tvec.at<double>(0,0), tvec.at<double>(1,0), tvec.at<double>(2,0)) "<<Vector3d( tvec.at<double>(0,0), tvec.at<double>(1,0), tvec.at<double>(2,0))<<endl;
ofstream kout("out.txt",ios::out);
if(!kout.eof())
{
    kout<<"pts3d.size "<<pts3d.size()<<endl;
    kout<<"pts2d.size "<<pts2d.size()<<endl;
    kout<<"K camera intrinsic is \n"<<K<<endl;
    kout<<"\npts_3d_ref_                          "<<"  pts2d is "<<endl;
    for(int i=0;i<pts3d.size();i++)
    {
        kout<<pts3d[i]<<"            "<<pts2d[i]<<endl;
    }
    kout<<"T_c_r_estimated_ is \n"<<T_c_r_estimated_<<endl;
}

下一步计划,加入BA优化,加入ICP估计位姿选项
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值