Opencv 3.4 中P3P位姿估计算法解析

先上图,Opencv3.4中用两种算法实现P3P位姿估计问题。一种是基于距离P3P算法问题(算法1:P3P),一种是基于矩阵P3P算法问题(算法2:aP3P),具体推导细节可以参看论文还以整理的本地关键技术文档。此处只对程序进行分析注释,方便以后使用。注意:此处只注释算法2,因为算法1没有完全搞明白(主要是高小山用wu-ritt算法推导我看不太懂)。

外部调用接口函数:

/*  max 注释
*   函数功能:P3P位姿估计的接口函数,此函数可以利用不用的flags值,选择不同的算法。
*             输入3个对应点,返回最多4组有效解
*   参数:
*   [in]    objectPoints                参考坐标系(或世界坐标系)中的3D点,需要3个即可,不要多也不要少。float和double都可以    
*   [in]    imagePoints                 对应3D点在相机相平面上的投影点的图像坐标。需要3个即可,不要多也不要少。注意这个是2D的。float和double都可以
*   [in]    cameraMatrix                相机内参矩阵
*   [in]    distCoeffs                  相机畸变矩阵
*   [out]   rvecs                       输出旋转矩阵,不用声明是多大矩阵-----输出是最多是4个解----每个解的旋转是用旋转向量表示的。------从参考坐标中的点到相机坐标系的点的变换
*   [out]   tvecs                       输出平移矩阵,不用声明多大矩阵------输出最多是4个解。
*   [in]    flags                       选择不同的结算位姿算法,SOLVEPNP_P3P是算法1,采用基于距离PnP算法。SOLVEPNP_AP3P算法2,采用基于矩阵PnP算法。
*
*    返回值:
*            返回解的个数,最大不会超过4个。
*/
int solveP3P(InputArray objectPoints, InputArray imagePoints,	InputArray cameraMatrix, InputArray distCoeffs,	OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,int flags);

测试例子:

#include "test_precomp.hpp"

void generate3DPointCloud(std::vector<cv::Point3f>& points, cv::Point3f pmin = cv::Point3f(-1, -1, 5), cv::Point3f pmax = cv::Point3f(1, 1, 10))
{
	cv::RNG rng = cv::theRNG(); // fix the seed to use "fixed" input 3D points

	for (size_t i = 0; i < points.size(); i++)
	{
		float _x = rng.uniform(pmin.x, pmax.x);
		float _y = rng.uniform(pmin.y, pmax.y);
		float _z = rng.uniform(pmin.z, pmax.z);
		points[i] = cv::Point3f(_x, _y, _z);
	}
}

void generateCameraMatrix(cv::Mat& cameraMatrix, cv::RNG& rng)
{
	const double fcMinVal = 1e-3;
	const double fcMaxVal = 100;
	cameraMatrix.create(3, 3, CV_64FC1);
	cameraMatrix.setTo(cv::Scalar(0));
	cameraMatrix.at<double>(0, 0) = rng.uniform(fcMinVal, fcMaxVal);
	cameraMatrix.at<double>(1, 1) = rng.uniform(fcMinVal, fcMaxVal);
	cameraMatrix.at<double>(0, 2) = rng.uniform(fcMinVal, fcMaxVal);
	cameraMatrix.at<double>(1, 2) = rng.uniform(fcMinVal, fcMaxVal);
	cameraMatrix.at<double>(2, 2) = 1;
}

void generateDistCoeffs(cv::Mat& distCoeffs, cv::RNG& rng)
{
	distCoeffs = cv::Mat::zeros(4, 1, CV_64FC1);
	for (int i = 0; i < 3; i++)
		distCoeffs.at<double>(i, 0) = rng.uniform(0.0, 1.0e-6);
}

void generatePose(cv::Mat& rvec, cv::Mat& tvec, cv::RNG& rng)
{
	const double minVal = 1.0e-3;
	const double maxVal = 1.0;
	rvec.create(3, 1, CV_64FC1);
	tvec.create(3, 1, CV_64FC1);
	for (int i = 0; i < 3; i++)
	{
		rvec.at<double>(i, 0) = rng.uniform(minVal, maxVal);
		tvec.at<double>(i, 0) = rng.uniform(minVal, maxVal / 10);
	}
}


int main()
{
	std::vector<cv::Point3f> points;
	points.resize(500);
	generate3DPointCloud(points);


	std::vector<cv::Mat> rvecs, tvecs;
	cv::Mat trueRvec, trueTvec;
	cv::Mat intrinsics, distCoeffs;

	generateCameraMatrix(intrinsics, cv::RNG());	
	generateDistCoeffs(distCoeffs, cv::RNG());

	generatePose(trueRvec, trueTvec, cv::RNG());

	std::vector<cv::Point3f> opoints;
	opoints = std::vector<cv::Point3f>(points.begin(), points.begin() + 3);

	std::vector<cv::Point2f> projectedPoints;
	projectedPoints.resize(opoints.size());
	projectPoints(cv::Mat(opoints), trueRvec, trueTvec, intrinsics, distCoeffs, projectedPoints);

	std::cout << "intrinsics: " << intrinsics << std::endl;
	std::cout << "distcoeffs: " << distCoeffs << std::endl;
	std::cout << "trueRvec: " << trueRvec << std::endl;
	std::cout << "trueTvec: " << trueTvec << std::endl;

	std::cout << "oPoint: " << opoints << std::endl;
	std::cout << "projectedPoints: " << projectedPoints << std::endl;



	std::cout<<"result numbers A: :"<<solveP3P(opoints, projectedPoints, intrinsics, distCoeffs, rvecs, tvecs, cv::SOLVEPNP_AP3P)<<std::endl;
	//std::cout << "result numbers: :" << solveP3P(opoints, projectedPoints, intrinsics, distCoeffs, rvecs, tvecs, cv::SOLVEPNP_P3P) << std::endl;

	bool isTestSuccess = false;
	double error = DBL_MAX;
	for (unsigned int i = 0; i < rvecs.size() /*&& !isTestSuccess*/; ++i)
	{
		double rvecDiff = cvtest::norm(rvecs[i], trueRvec, cv::NORM_L2);
		double tvecDiff = cvtest::norm(tvecs[i], trueTvec, cv::NORM_L2);
		isTestSuccess = rvecDiff < 1.0e-4 && tvecDiff < 1.0e-4;
		error = std::min(error, std::max(rvecDiff, tvecDiff));
		std::cout << "i: " << i << std::endl;
		std::cout << "error: " << error << std::endl;
		std::cout << "rvec: " << rvecs[i] << std::endl;

	}

	system("pause");
}

算法2中代码描述,下面所描述的不是Opencv外部提供的接口,属于算法的内部实现。---代码在ap3p.h头文件中。


#ifndef P3P_P3P_H
#define P3P_P3P_H

#include "precomp.hpp"

namespace cv {
class ap3p {
private:
    template<typename T>
    void init_camera_parameters(const cv::Mat &cameraMatrix) {
        cx = cameraMatrix.at<T>(0, 2);
        cy = cameraMatrix.at<T>(1, 2);
        fx = cameraMatrix.at<T>(0, 0);
        fy = cameraMatrix.at<T>(1, 1);
    }

    template<typename OpointType, typename IpointType>
    void extract_points(const cv::Mat &opoints, const cv::Mat &ipoints, std::vector<double> &points) {
        points.clear();
        int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
        points.resize(5*npoints);
        for (int i = 0; i < npoints; i++) {
            points[i * 5] = ipoints.at<IpointType>(i).x * fx + cx;
            points[i * 5 + 1] = ipoints.at<IpointType>(i).y * fy + cy;
            points[i * 5 + 2] = opoints.at<OpointType>(i).x;
            points[i * 5 + 3] = opoints.at<OpointType>(i).y;
            points[i * 5 + 4] = opoints.at<OpointType>(i).z;
        }
    }

    void init_inverse_parameters();

    double fx, fy, cx, cy;
    double inv_fx, inv_fy, cx_fx, cy_fy;
public:
	// 3个初始化构造函数的目的是获取相机内参数,并且计算相机内参矩阵的逆。---------用于求解论文中的bearing measurement
    ap3p() : fx(0), fy(0), cx(0), cy(0), inv_fx(0), inv_fy(0), cx_fx(0), cy_fy(0) {}

    ap3p(double fx, double fy, double cx, double cy);

    ap3p(cv::Mat cameraMatrix);



	/*  max 注释
	*   函数功能:此处的位姿估计只返回一个解,并且此处输入的是4个点---注意并不是solveP3P所调用。调用顺序为①-->④-->③
	*   参数:
	*   [out]   R                输出单个旋转矩阵
	*   [out]   tvec             输出单个平移向量
	*   [in]    opoints          输入3D点---4个
	*   [in]    ipoints          输入2D点---4个
	*
	*    返回值:
	*            成功返回true,失败返回false
	*/
    ① bool solve(cv::Mat &R, cv::Mat &tvec, const cv::Mat &opoints, const cv::Mat &ipoints);

	/*  max 注释
	*   函数功能:此处的位姿估计只返回最多4个解,是solveP3P所调用。调用顺序为②-->③
	*   参数:
	*   [out]   R                输出最多4个旋转矩阵
	*   [out]   tvec             输出最多4平移向量
	*   [in]    opoints          输入3D点---3个
	*   [in]    ipoints          输入2D点---3个
	*
	*    返回值:
	*            成功返回true,失败返回false
	*/
    ② int solve(std::vector<cv::Mat> &Rs, std::vector<cv::Mat> &tvecs, const cv::Mat &opoints, const cv::Mat &ipoints);

    ③ int solve(double R[4][3][3], double t[4][3],
              double mu0, double mv0, double X0, double Y0, double Z0,
              double mu1, double mv1, double X1, double Y1, double Z1,
              double mu2, double mv2, double X2, double Y2, double Z2);

    ④ bool solve(double R[3][3], double t[3],
               double mu0, double mv0, double X0, double Y0, double Z0,
               double mu1, double mv1, double X1, double Y1, double Z1,
               double mu2, double mv2, double X2, double Y2, double Z2,
               double mu3, double mv3, double X3, double Y3, double Z3);


	// 此处完全按照论文的计算步骤来的,程序非常清晰,可以参考论文
    // This algorithm is from "Tong Ke, Stergios Roumeliotis, An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (Accepted by CVPR 2017)
    // See https://arxiv.org/pdf/1701.08237.pdf
    // featureVectors: 3 bearing measurements (normalized) stored as column vectors
    // worldPoints: Positions of the 3 feature points stored as column vectors
    // solutionsR: 4 possible solutions of rotation matrix of the world w.r.t the camera frame
    // solutionsT: 4 possible solutions of translation of the world origin w.r.t the camera frame
    int computePoses(const double featureVectors[3][3], const double worldPoints[3][3], double solutionsR[4][3][3],
                     double solutionsT[4][3]);

};
}
#endif //P3P_P3P_H


  • 1
    点赞
  • 21
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值