//使用对极几何校验,真值的准确性
//相机的畸变参数
vector<double> distort_coefficients = { 0.0034823894022493434, 0.0007150348452162257, -0.0020532361418706202, 0.00020293673591811182 };
//下面是相机的内参
double fu{ 190.97847715128717 }, fv{ 190.9733070521226 }, cu{ 254.93170605935475 }, cv{ 256.8974428996504 };
double width{ 512 }, height{ 512 };
Eigen::Matrix3f cam_matrix;
cam_matrix << fu, 0, cu, 0, fv, cv, 0, 0, 1;
cv::Mat camMatrix = (cv::Mat_<double>(3, 3) << fu, 0, cu, 0, fv, cv, 0, 0, 1);
std::cout << "cam_matrix: " << cam_matrix << std::endl;
//三张图像的路径
const string img_path0 = "D:\\data\\Tum\\dataset-room4_512_16\\mav0\\cam0\\data\\1520531127000601935.png";
const string img_path1 = "D:\\data\\Tum\\dataset-room4_512_16\\mav0\\cam0\\data\\1520531128000628654.png";
const string img_path2 = "D:\\data\\Tum\\dataset-room4_512_16\\mav0\\cam0\\data\\1520531130000855514.png";
//三张图像对应时刻imu的位置和姿态(注意是imu的姿态),采用四元数的形式 8个数字分别为 x,y,z,qx,qy,qz,qw, 从真值文件中可以读出
vector<float> img0_ex = { 1520531127.002818, 0.807576, - 0.267423, 1.271641, 0.008726, - 0.020400, - 0.001268, 0.999753 };
vector<float> img1_ex = { 1520531128.002818, 0.852157, - 0.279911, 1.318899, 0.034031, - 0.013359, 0.014252, 0.999230 };
vector<float> img2_ex = { 1520531130.002818, 1.460797, - 0.624434, 1.463778, 0.009686, - 0.034511, 0.143027, 0.989070 };
//姿态的四元数表示
Eigen::Quaternionf q0(img0_ex[7], img0_ex[4], img0_ex[5], img0_ex[6]), q1(img1_ex[7], img1_ex[4], img1_ex[5], img1_ex[6])
, q2(img2_ex[7], img2_ex[4], img2_ex[5], img2_ex[6]);
//四元数转旋转矩阵
Eigen::Matrix3f rot0, rot1, rot2;
std::cout << "q0.w(): " << q0.w() << std::endl;
rot0 = q0.normalized().toRotationMatrix();
rot1 = q1.normalized().toRotationMatrix();
rot2 = q2.normalized().toRotationMatrix();
//构建平移
Eigen::Vector3f t0{ img0_ex[1],img0_ex[2],img0_ex[3] }, t1{ img1_ex[1],img1_ex[2],img1_ex[3] }, t2{ img2_ex[1],img2_ex[2],img2_ex[3] };
//cam 到imu的内参
Eigen::Matrix4f Cali_imu_cam;
Cali_imu_cam << -0.99953071, 0.00744168, -0.02971511, 0.04536566,
0.0294408, -0.03459565, -0.99896766, -0.071996,
-0.00846201, -0.99937369, 0.03436032, -0.04478181,
0.0, 0.0, 0.0, 1.0;
//构建4*4的变换矩阵
Eigen::Matrix4f T0, T1, T2;
T0.setIdentity();
T1.setIdentity();
T2.setIdentity();
T0.topLeftCorner<3,3>() = rot0;
T0.topRightCorner<3, 1>() = t0;
T1.topLeftCorner<3,3>() = rot1;
T1.topRightCorner<3,1>() = t1;
T2.topLeftCorner<3,3>() = rot2;
T2.topRightCorner<3,1>() = t2;
std::cout << "T0: " << T0 << std::endl;
std::cout << "T1: " << T1 << std::endl;
std::cout << "T2: " << T2<< std::endl;
cv::Mat image0 = cv::imread(img_path0, 0);
cv::Mat image1 = cv::imread(img_path1, 0);
cv::Mat image2 = cv::imread(img_path2, 0);
cv::Mat undis_image0 = image0.clone();
cv::Mat undis_image1 = image1.clone();
cv::Mat undis_image2 = image2.clone();
//去除畸变,鱼眼相机采用,cv::fisheye 下面的函数
cv::fisheye::undistortImage(image0, undis_image0, camMatrix, distort_coefficients, camMatrix, cv::Size(image0.cols, image0.rows));
cv::fisheye::undistortImage(image1, undis_image1, camMatrix, distort_coefficients, camMatrix, cv::Size(image1.cols, image1.rows));
cv::fisheye::undistortImage(image2, undis_image2, camMatrix, distort_coefficients, camMatrix, cv::Size(image2.cols, image2.rows));
//cam1 到cam 2的转换矩阵,下面的式子可以这样解释,cam坐标系下观测到一个点,转到T1系下,
//在转到世界坐标系下,再转到T2系下,再转到t2时刻对应的相机系下面
Eigen::Matrix4f T_image2_1 = Cali_imu_cam.inverse()*T2.inverse()*T1*Cali_imu_cam;
std::cout << "T_image2_1: " << std::endl << T_image2_1 << std::endl;
//去除R和t
Eigen::Vector3f t_image2_1 = T_image2_1.block<3, 1>(0, 3);
Eigen::Matrix3f r_image2_1 = T_image2_1.block<3, 3>(0, 0);
//构建反对称矩阵
Eigen::Matrix3f w_hat;
w_hat << 0, -t_image2_1(2), t_image2_1(1),
t_image2_1(2), 0, -t_image2_1(0),
-t_image2_1(1), t_image2_1(0), 0;
//构建本征矩阵
Eigen::Matrix3f essential = w_hat * r_image2_1;
std::cout << "essendtial: " <<essential<< std::endl;
//构建基本矩阵,注意 essential左边乘的是转置的逆,右边乘的是逆,两边不相同
Eigen::Matrix3f foundamental = cam_matrix.transpose().inverse()*essential*cam_matrix.inverse();
std::cout << "foundamental: " << foundamental << std::endl;
vector<cv::Point2f> vec_points;
cv::RNG &rng = cv::theRNG();
for (int i = 0; i < 10; i++)
{
Eigen::Vector3f pt1;
pt1 << rng(512), rng(512), 1; //随机取一个点
std::cout << "pt1: " << pt1 << std::endl;
//pt1点再img2 中对应的极线方程
Eigen::Vector3f eline = foundamental * pt1;
//再极线上取两个点
Eigen::Vector2f pt2_1(0, -eline(2) / eline(1)), pt2_2(width, (-eline(2) - eline(0)*width) / eline(1));
//随机产生颜色
cv::Scalar color = cv::Scalar(255, 255, 255);
circle(undis_image1, cv::Point(pt1.x(), pt1.y()), 5, color, 3);
//画出极线,观察pt1的对应点是否在刚画的极线上面
line(undis_image2, cv::Point(pt2_1.x(),pt2_1.y()), cv::Point(pt2_2.x(),pt2_2.y()),color);
cv::imshow("img1", undis_image1);
cv::imshow("img2", undis_image2);
cv::waitKey(0);
}
//cv::computeCorrespondEpilines(vec_points,1,)
return 0;