对极几何(2D-2D)
(1)对极约束理论推导(几何方法)
两帧图像,从蓝色到绿色的运动为 R 与 t ,设两个坐标系 O 与 O’ ,在 O 坐标系下点 X 在归一化平面的坐标为 x ,在 O’ 坐标系下 X 在归一化平面的坐标为 x’ 。目的是将第一个坐标系下的坐标转换到第二个坐标系下的在坐标,即用 O‘ 坐标系来描述 O 坐标系下的点。所以计算的 R 是 R O ′ O R_{O'O} RO′O 和 t O ′ O t_{O'O} tO′O,所以图上的 t 方向是由 O‘ 指向 O 。
由三角形上的向量关系可得:
x
′
⋅
(
t
×
x
)
=
0
x' \cdot(t \times x)=0
x′⋅(t×x)=0
矩阵形式:
x
′
T
(
t
×
R
x
)
=
x
′
T
t
Λ
R
x
=
0
x'\;^T (t\times R\;x)=x'\;^T t^\Lambda R\;x=0
x′T(t×Rx)=x′TtΛRx=0
关键点便在于
x
=
R
x
x = R\;x
x=Rx 这里,原因:x是 O坐标系下的坐标,需要转换到 O‘ 坐标系下,所以左乘 R。
此时得到本质矩阵E:
E
=
t
Λ
R
E =t^\Lambda R
E=tΛR
算了基础矩阵F就先不推导了,总之先记住:
F
=
K
−
T
E
K
−
1
F=K^{-T}EK^{-1}
F=K−TEK−1
且
x
2
T
E
x
1
=
p
2
T
F
p
1
=
0
x_2^T Ex_1 = p_2^TFp_1 = 0
x2TEx1=p2TFp1=0
其中,
x
1
x_1
x1,
x
2
x_2
x2为归一化平面上的点,
p
1
p_1
p1,
p
2
p_2
p2为像素平面上的点。
本质矩阵E可以通过奇异值分解得到相机运动的R与t。
主程序:
验证对极约束
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
using namespace std;
using namespace cv;
/****************************************************
* 本程序演示了如何使用2D-2D的特征匹配估计相机运动
* **************************************************/
void find_feature_matches(
const Mat &img_1, const Mat &img_2,
std::vector<KeyPoint> &keypoints_1,
std::vector<KeyPoint> &keypoints_2,
std::vector<DMatch> &matches);
void pose_estimation_2d2d(
std::vector<KeyPoint> keypoints_1,
std::vector<KeyPoint> keypoints_2,
std::vector<DMatch> matches,
Mat &R, Mat &t);
// 像素坐标转相机归一化坐标
Point2d pixel2cam(const Point2d &p, const Mat &K);
int main(int argc, char **argv)
{
//-- 1.读取图像
Mat img_1 = imread("../1.png");
Mat img_2 = imread("../2.png");
vector<KeyPoint> keypoints_1, keypoints_2;
vector<DMatch> matches;
find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
cout << "一共找到了" << matches.size() << "组匹配点" << endl;
//-- 2.估计两张图像间运动
Mat R, t;
pose_estimation_2d2d(keypoints_1, keypoints_2, matches, R, t);
//-- 3.验证E=t^R*scale
Mat t_x =
(Mat_<double>(3, 3) << 0, -t.at<double>(2, 0), t.at<double>(1, 0),
t.at<double>(2, 0), 0, -t.at<double>(0, 0),
-t.at<double>(1, 0), t.at<double>(0, 0), 0);
cout << "t^R=" << endl << t_x * R << endl;
//-- 4.验证对极约束
Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
for (DMatch m: matches)
{
Point2d pt1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
Mat y1 = (Mat_<double>(3, 1) << pt1.x, pt1.y, 1);
Point2d pt2 = pixel2cam(keypoints_2[m.trainIdx].pt, K);
Mat y2 = (Mat_<double>(3, 1) << pt2.x, pt2.y, 1);
Mat d = y2.t() * t_x * R * y1;
cout << "epipolar constraint = " << d << endl;
}
return 0;
}
像素坐标转相机归一化坐标
Point2d pixel2cam(const Point2d &p, const Mat &K) {
return Point2d
(
(p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
(p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
);
}
靠,怎么归一化的啊。。。。
求本质矩阵E,基本矩阵F,R以及t:
void pose_estimation_2d2d(std::vector<KeyPoint> keypoints_1,
std::vector<KeyPoint> keypoints_2,
std::vector<DMatch> matches,
Mat &R, Mat &t)
{
// 相机内参,TUM Freiburg2
Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
//-- 把匹配点转换为vector<Point2f>的形式
vector<Point2f> points1;
vector<Point2f> points2;
for (int i = 0; i < (int) matches.size(); i++) {
points1.push_back(keypoints_1[matches[i].queryIdx].pt);
points2.push_back(keypoints_2[matches[i].trainIdx].pt);
}
//-- 计算基础矩阵
Mat fundamental_matrix;
fundamental_matrix = findFundamentalMat(points1, points2, -FM_8POINT);
cout << "fundamental_matrix is " << endl << fundamental_matrix << endl;
//-- 计算本质矩阵
Point2d principal_point(325.1, 249.7); //相机光心, TUM dataset标定值
double focal_length = 521; //相机焦距, TUM dataset标定值
Mat essential_matrix;
essential_matrix = findEssentialMat(points1, points2, focal_length, principal_point);
cout << "essential_matrix is " << endl << essential_matrix << endl;
//-- 计算单应矩阵
//-- 但是本例中场景不是平面,单应矩阵意义不大
Mat homography_matrix;
homography_matrix = findHomography(points1, points2, RANSAC, 3);
cout << "homography_matrix is " << endl << homography_matrix << endl;
//-- 从本质矩阵中恢复旋转和平移信息.
// 此函数仅在Opencv3中提供
recoverPose(essential_matrix, points1, points2, R, t, focal_length, principal_point);
cout << "R is " << endl << R << endl;
cout << "t is " << endl << t << endl;
}
(2)作业3 从E恢复R,t
#include <Eigen/Core>
#include <Eigen/Dense>
#include <Eigen/Geometry>
using namespace Eigen;
#include <sophus/so3.hpp>
#include <iostream>
using namespace std;
int main(int argc, char **argv) {
// Essential matrix
Matrix3d E;
E << -0.0203618550523477, -0.4007110038118445, -0.03324074249824097,
0.3939270778216369, -0.03506401846698079, 0.5857110303721015,
-0.006788487241438284, -0.5815434272915686, -0.01438258684486258;
// R,t to be recovered
Matrix3d R;
Vector3d t;
// SVD and fix sigular values
// 1.定义一个SVD分解
JacobiSVD<Matrix3d> svd (E, ComputeFullV | ComputeFullU);
Matrix3d U = svd.matrixU();
Matrix3d V = svd.matrixV();
Vector3d singular_values = svd.singularValues();
// 2.处理奇异值
double singular_value = (singular_values[0] + singular_values[1]) / 2;
singular_values = Vector3d(singular_value, singular_value, 0);
DiagonalMatrix<double,3> sigma (singular_values); //得到diag()矩阵
// set t1, t2, R1, R2
// 3.四个可能的解
Matrix3d Rz1 = AngleAxisd(M_PI/2, Vector3d(0,0,1)).toRotationMatrix();
Matrix3d Rz2 = AngleAxisd(-M_PI/2, Eigen::Vector3d(0,0,1)).toRotationMatrix();
Matrix3d t_wedge1 = U * Rz1 * sigma * U.transpose();
Matrix3d t_wedge2 = U * Rz2 * sigma * U.transpose();
Matrix3d R1 = U * Rz1.transpose() * V.transpose();
Matrix3d R2 = U * Rz2.transpose() * V.transpose();
cout << "R1 = " << R1 << endl;
cout << "R2 = " << R2 << endl;
cout << "t1 = " << Sophus::SO3d::vee(t_wedge1) << endl;
cout << "t2 = " << Sophus::SO3d::vee(t_wedge2) << endl;
// check t^R=E up to scale
Matrix3d tR = t_wedge1 * R1;
cout << "t^R = " << tR << endl;
return 0;
}
(3)三角测量
具体求解算法参见《SLAM–三角测量SVD分解法、最小二乘法及R t矩阵的判断》http://t.csdn.cn/YFbBb
主程序
int main(int argc, char **argv) {
//-- 1.读取图像
Mat img_1 = imread("../1.png");
Mat img_2 = imread("../2.png");
//-- 2.匹配
vector<KeyPoint> keypoints_1, keypoints_2;
vector<DMatch> matches;
find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
cout << "一共找到了" << matches.size() << "组匹配点" << endl;
//-- 3.估计两张图像间运动
Mat R, t;
pose_estimation_2d2d(keypoints_1, keypoints_2, matches, R, t); //得到R,t
//-- 4.三角化
vector<Point3d> points;
triangulation(keypoints_1, keypoints_2, matches, R, t, points);
//-- 5.验证三角化点与特征点的重投影关系
Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
Mat img1_plot = img_1.clone();
Mat img2_plot = img_2.clone();
for (int i = 0; i < matches.size(); i++) {
// 第一个图
float depth1 = points[i].z;
cout << "depth: " << depth1 << endl;
Point2d pt1_cam = pixel2cam(keypoints_1[matches[i].queryIdx].pt, K);
cv::circle(img1_plot, keypoints_1[matches[i].queryIdx].pt, 2, get_color(depth1), 2);
// 第二个图
Mat pt2_trans = R * (Mat_<double>(3, 1) << points[i].x, points[i].y, points[i].z) + t;
float depth2 = pt2_trans.at<double>(2, 0);
cv::circle(img2_plot, keypoints_2[matches[i].trainIdx].pt, 2, get_color(depth2), 2);
}
cv::imshow("img 1", img1_plot);
cv::imshow("img 2", img2_plot);
cv::waitKey();
return 0;
}
通过匹配及对极几何得到R,t求得T1,T2;K,两个相机坐标系下归一化的点pts,使用cv内置函数:cv::triangulatePoints(T1, T2, pts_1, pts_2, pts_4d);
求得pst_4d然后转换成齐次坐标得到 p 点。
void triangulation(
const vector<KeyPoint> &keypoint_1,
const vector<KeyPoint> &keypoint_2,
const std::vector<DMatch> &matches,
const Mat &R, const Mat &t,
vector<Point3d> &points) {
Mat T1 = (Mat_<float>(3, 4) <<
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0);
Mat T2 = (Mat_<float>(3, 4) <<
R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2), t.at<double>(0, 0),
R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2), t.at<double>(1, 0),
R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2), t.at<double>(2, 0)
);
Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
vector<Point2f> pts_1, pts_2;
for (DMatch m:matches) {
// 将像素坐标转换至相机坐标
pts_1.push_back(pixel2cam(keypoint_1[m.queryIdx].pt, K));
pts_2.push_back(pixel2cam(keypoint_2[m.trainIdx].pt, K));
}
Mat pts_4d;
cv::triangulatePoints(T1, T2, pts_1, pts_2, pts_4d);
// 转换成非齐次坐标
for (int i = 0; i < pts_4d.cols; i++) {
Mat x = pts_4d.col(i);
x /= x.at<float>(3, 0); // 归一化
Point3d p(
x.at<float>(0, 0),
x.at<float>(1, 0),
x.at<float>(2, 0)
);
points.push_back(p);
}
}