opencv SolvePnPRansac使用

参考
https://blog.csdn.net/xuelangwin/article/details/80847337

1.传归一化坐标使用

下面代码验证将坐标点转到归一化平面使用SolvePnPRansac解出来的位置是否正确。

#include<time.h>
 #include <unistd.h>
#include<iostream>
#include<eigen3/Eigen/Core>
#include<eigen3/Eigen/Geometry>
#include <opencv2/opencv.hpp>

using namespace cv;
using namespace std;
using namespace Eigen;

void generate3DPointCloud(std::vector<cv::Point3f>& points)
{
    cv::Point3f pmin = cv::Point3f(-1, -1, 5);
    cv::Point3f pmax = cv::Point3f(1, 1, 10);
    cv::RNG rng = cv::theRNG(); // fix the seed to use "fixed" input 3D points

    for (size_t i = 0; i < points.size(); i++)
    {
        float _x = rng.uniform(pmin.x, pmax.x);
        float _y = rng.uniform(pmin.y, pmax.y);
        float _z = rng.uniform(pmin.z, pmax.z);
        points[i] = cv::Point3f(_x, _y, _z);
    }
}

void generateCameraMatrix(cv::Mat& cameraMatrix, cv::RNG& rng)
{
    const double fcMinVal = 1e-3;
    const double fcMaxVal = 100;
    cameraMatrix.create(3, 3, CV_64FC1);
    cameraMatrix.setTo(cv::Scalar(0));
    cameraMatrix.at<double>(0, 0) = rng.uniform(fcMinVal, fcMaxVal);
    cameraMatrix.at<double>(1, 1) = rng.uniform(fcMinVal, fcMaxVal);
    cameraMatrix.at<double>(0, 2) = rng.uniform(fcMinVal, fcMaxVal);
    cameraMatrix.at<double>(1, 2) = rng.uniform(fcMinVal, fcMaxVal);
    cameraMatrix.at<double>(2, 2) = 1;
}

void generateDistCoeffs(cv::Mat& distCoeffs, cv::RNG& rng)
{
    distCoeffs = cv::Mat::zeros(4, 1, CV_64FC1);
    for (int i = 0; i < 3; i++)
        distCoeffs.at<double>(i, 0) = rng.uniform(0.0, 1.0e-6);
}

void generatePose(cv::Mat& rvec, cv::Mat& tvec, cv::RNG& rng)
{
    const double minVal = 1.0e-3;
    const double maxVal = 1.0;
    rvec.create(3, 1, CV_64FC1);
    tvec.create(3, 1, CV_64FC1);
    for (int i = 0; i < 3; i++)
    {
        rvec.at<double>(i, 0) = rng.uniform(minVal, maxVal);
        tvec.at<double>(i, 0) = rng.uniform(minVal, maxVal / 10);
    }
}

int main()
{
    std::vector<cv::Point3f> points;
    points.resize(500);
    generate3DPointCloud(points);
    cv::Mat trueRvec, trueTvec;
    //cv::Mat intrinsics, distCoeffs;
    cv::RNG rng = cv::RNG();
    //generateCameraMatrix(intrinsics, rng);
    //generateDistCoeffs(distCoeffs, rng);
    double fx = 200,fy=200,cx=500, cy =500;
    cv::Mat intrinsics = (Mat_<double>(3, 3) <<
                   fx, 0, cx,
                   0, fy, cy,
                   0, 0, 1);
    cv::Mat K = (cv::Mat_<double>(3, 3) << 1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0);
    cv::Mat distCoeffs;
    //cv::Mat distCoeffs =(cv::Mat_<double>(4, 1) << 0.0, 0, 0, 0);
    generatePose(trueRvec, trueTvec, rng);
    std::vector<cv::Point3f> opoints;
    //opoints = std::vector<cv::Point3f>(points.begin(), points.begin() + 4);
    opoints = std::vector<cv::Point3f>(points.begin(), points.end());
    std::vector<cv::Point2f> projectedPoints;
    projectedPoints.resize(opoints.size());
    projectPoints(cv::Mat(opoints), trueRvec, trueTvec, intrinsics, distCoeffs, projectedPoints);
     std::vector<cv::Point2f> projectedPointsNorm;
    for (size_t i = 0; i < projectedPoints.size(); i++)
    {
        cv::Point2f p_norm;
        p_norm.x=(projectedPoints[i].x-cx)/fx;
        p_norm.y=(projectedPoints[i].y-cy)/fy;
        projectedPointsNorm.push_back(p_norm);
    }
    std::cout << "intrinsics: " << intrinsics << std::endl;
    std::cout << "distcoeffs: " << distCoeffs << std::endl;
    //std::cout << "oPoint: " << opoints << std::endl;
    //std::cout << "projectedPoints: " << projectedPoints << std::endl;
    cv::Mat rvec, tvec;
    solvePnP(opoints, projectedPointsNorm, K, distCoeffs, rvec, tvec, false,cv::SOLVEPNP_EPNP);
    std::cout << "trueRvec: " << trueRvec << std::endl;
    std::cout << "trueTvec: " << trueTvec << std::endl;
    std::cout <<"SOLVEPNP_EPNP result_rvec"<< rvec <<"---" <<norm(rvec-trueRvec)<<std::endl;
    std::cout << "SOLVEPNP_EPNP result_tvec"<<tvec << std::endl;
    std::cout << "---------------Ransac--------------------"<< std::endl;
    Mat inliers;
    solvePnPRansac(opoints, projectedPointsNorm, K, distCoeffs, rvec, tvec, true, 100, 2, 0.99, inliers,cv::SOLVEPNP_ITERATIVE);
    std::cout <<"Ransac result_rvec"<< rvec <<"---" <<norm(rvec-trueRvec)<<std::endl;
    std::cout << "Ransac result_tvec"<<tvec << std::endl;
    return 0;
}

2.传uv参数使用

#include<time.h>
#include <unistd.h>
#include<iostream>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
void generate3DPointCloud(std::vector<cv::Point3d>& points)
{
    cv::Point3d pmin = cv::Point3d(-1, -1, 5);
    cv::Point3d pmax = cv::Point3d(1, 1, 10);
    cv::RNG rng = cv::theRNG(); // fix the seed to use "fixed" input 3D points
    
    for (size_t i = 0; i < points.size(); i++)
    {
        double _x = rng.uniform(pmin.x, pmax.x);
        double _y = rng.uniform(pmin.y, pmax.y);
        double _z=rng.uniform(pmin.z, pmax.z);
        points[i]=(cv::Point3d(_x, _y, _z));
    }
}
int main( int argc, char** argv )
{
    //1.1 get para intrinsic
    double f = 200, w = 1000, h = 1000;
    cv::Mat K = (Mat_<double>(3, 3) <<
                 f, 0, w/2,
                 0, f, h/2,
                 0, 0, 1);
    std::vector<cv::Point3d> vp3d;
    vp3d.resize(500);
    generate3DPointCloud(vp3d);
    vector<Point2d> prj2d;
    Mat true_R=(cv::Mat_<double>(3, 3) <<
                1,0,0,
                0,1,0,
                0,0,1);
    Mat true_rvec;
    cv::Mat true_t = (cv::Mat_<double>(3, 1) << 1,1,0);
    Rodrigues(true_R,true_rvec);
    cv::projectPoints(vp3d,true_rvec,true_t,K,Mat(),prj2d);
    for(int i=0;i<prj2d.size();i++)
    {
        cout<<"prj2d x"<<prj2d[i].x<< "prj2d[i].y"<<prj2d[i].y<<endl;
    }
    Mat rvec;
    Mat tvec;
    //  Mat rvec=true_rvec;
    //  Mat tvec=true_t;
    solvePnP(vp3d, prj2d, K, Mat(), rvec, tvec,false,cv::SOLVEPNP_EPNP);
    //cv::Mat inliers;
    //solvePnPRansac(vp3d, prj2d, K, Mat(), rvec, tvec, true, 100, 2, 0.99, inliers);
    cout<<"tvec"<<tvec<<endl;
    return 0;
}
  • 1
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
`solvePnP`和`solvePnPRansac`都是OpenCV的函数,用于解决相机姿态估计问题。 `solvePnP`函数使用已知的3D物体模型点和对应的2D图像点来计算相机的旋转和平移。它需要提供相机内参矩阵、畸变系数和3D物体模型点的坐标。它返回相机的旋转向量和平移向量。 示例代码如下: ``` import cv2 import numpy as np # 相机内参矩阵 K = np.array([[1000, 0, 500], [0, 1000, 500], [0, 0, 1]]) # 畸变系数 dist = np.array([0, 0, 0, 0]) # 3D物体模型点坐标 obj_points = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.float32) # 2D图像点坐标 img_points = np.array([[500, 500], [500, 600], [600, 500], [600, 600]], dtype=np.float32) # 使用solvePnP计算相机姿态 success, rvec, tvec = cv2.solvePnP(obj_points, img_points, K, dist) # 打印结果 print("Rotation Vector:\n", rvec) print("Translation Vector:\n", tvec) ``` `solvePnPRansac`函数使用随机样本一致性算法(RANSAC)来减少对异常点的敏感性。它可以自动排除错误的2D-3D点匹配。除了`solvePnP`需要的参数外,它还需要设置RANSAC的最大重试次数和阈值。它返回相机的旋转向量和平移向量。 示例代码如下: ``` import cv2 import numpy as np # 相机内参矩阵 K = np.array([[1000, 0, 500], [0, 1000, 500], [0, 0, 1]]) # 畸变系数 dist = np.array([0, 0, 0, 0]) # 3D物体模型点坐标 obj_points = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.float32) # 2D图像点坐标 img_points = np.array([[500, 500], [500, 600], [600, 500], [600, 600]], dtype=np.float32) # 使用solvePnPRansac计算相机姿态 _, rvec, tvec, _ = cv2.solvePnPRansac(obj_points, img_points, K, dist, flags=cv2.SOLVEPNP_ITERATIVE) # 打印结果 print("Rotation Vector:\n", rvec) print("Translation Vector:\n", tvec) ``` `solvePnP`和`solvePnPRansac`的区别在于它们对异常点的处理方式。`solvePnP`会将所有的点都考虑在内,因此对异常点比较敏感;而`solvePnPRansac`会通过随机抽样和RANSAC算法来排除异常点,因此对异常点不太敏感。但是,由于`solvePnPRansac`要进行随机抽样和重试,因此它的速度可能会比`solvePnP`慢。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值