通过匹配的视觉特征点 对极约束获取本质矩阵E,再分解本质矩阵获得相机的R,T
详细的原理解释看这里 http://blog.csdn.net/aichipmunk/article/details/48157369
bool MotionEstimator::solveRelativeRT(const vector<pair<Vector3d, Vector3d>> &corres, Matrix3d &Rotation, Vector3d &Translation) { if (corres.size() >= 15) { vector<cv::Point2f> ll, rr; for (int i = 0; i < int(corres.size()); i++) { ll.push_back(cv::Point2f(corres[i].first(0), corres[i].first(1))); rr.push_back(cv::Point2f(corres[i].second(0), corres[i].second(1))); } cv::Mat mask; //根据匹配点求取本征矩阵,使用RANSAC,进一步排除失配点 cv::Mat E = cv::findFundamentalMat(ll, rr, cv::FM_RANSAC, 0.3 / 460, 0.99, mask); //double feasible_count = countNonZero(mask); //cout << (int)feasible_count << " -in- " << p1.size() << endl; cv::Mat cameraMatrix = (cv::Mat_<double>(3, 3) << 1, 0, 0, 0, 1, 0, 0, 0, 1); cv::Mat rot, trans; //分解本征矩阵,获取相对变换 int inlier_cnt = cv::recoverPose(E, ll, rr, cameraMatrix, rot, trans, mask); //cout << "inlier_cnt " << inlier_cnt << endl; Eigen::Matrix3d R; Eigen::Vector3d T; for (int i = 0; i < 3; i++) { T(i) = trans.at<double>(i, 0); for (int j = 0; j < 3; j++) R(i, j) = rot.at<double>(i, j); } Rotat