双目立体标定源码
OpenCV版本4.0.0 Visual studio2017版本
如果遇到任何问题,或者有错误的地方,欢迎评论留言指正
本段代码亲测可用,直接复制即可
注意:有些路径是需要更改的,注释中已有说明
很多文章中的源码,不是收费,就是运行不成功,且注释较少,较难理解。我在这份代码中加了足够多的注释,希望这份代码能对和我一样刚学习标定的同学有所帮助。
#include <opencv2/core/core.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <stdio.h>
#include <iostream>
using namespace cv;
using namespace std;
vector< vector< Point3f > > object_points;//存放物点坐标
vector< vector< Point2f > > left_img_points, right_img_points;//存放像点坐标
vector< Point2f > corners1, corners2;//存放检测的角点坐标
Mat img1, img2, gray1, gray2;
void load_image_points(int board_width, int board_height, int num_imgs, float square_size,
const char* leftimg_dir, const char* rightimg_dir, const char* leftimg_filename, const char* rightimg_filename, const char* extension) {
//该函数为了获得物点坐标与像点坐标,将其存放在对应数组下
Size board_size = Size(board_width, board_height);//标定板尺寸
int board_n = board_width * board_height;//标定板格子数目
for (int i = 1; i <= num_imgs; i++) {
char left_img[100], right_img[100];
sprintf_s(left_img, "%s%s%d.%s", leftimg_dir, leftimg_filename, i, extension);//获取左右图片目录完整文件夹名称
sprintf_s(right_img, "%s%s%d.%s", rightimg_dir, rightimg_filename, i, extension);
img1 = imread(left_img, IMREAD_COLOR);//读取左右图片文件
img2 = imread(right_img, IMREAD_COLOR);
cvtColor(img1, gray1, COLOR_BGR2GRAY);
cout << img1.size() << endl;
cout << img2.size() << endl;
cvtColor(img2, gray2, COLOR_BGR2GRAY);
bool found1 = false, found2 = false;
found1 = cv::findChessboardCorners(img1, board_size, corners1,
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FILTER_QUADS);//提取左右角点坐标
found2 = cv::findChessboardCorners(img2, board_size, corners2,
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FILTER_QUADS);
if (!found1 || !found2) {//判断左右图像中角点坐标是否被找到
cout << "Chessboard find error!" << endl;
cout << "leftImg: " << left_img << " and rightImg: " << right_img << endl;
continue;
}
if (found1) {//如果找到了进一步提取亚像素角点坐标
cv::cornerSubPix(gray1, corners1, cv::Size(5, 5), cv::Size(-1, -1),
cv::TermCriteria(TermCriteria::EPS | TermCriteria::MAX_ITER, 30, 0.1));
cv::drawChessboardCorners(gray1, board_size, corners1, found1);
}
if (found2) {
cv::cornerSubPix(gray2, corners2, cv::Size(5, 5), cv::Size(-1, -1),
cv::TermCriteria(TermCriteria::EPS | TermCriteria::MAX_ITER, 30, 0.1));
cv::drawChessboardCorners(gray2, board_size, corners2, found2);
}
vector< Point3f > obj;//每张图片的物点放入obj数组
for (int i = 0; i < board_height; i++)
for (int j = 0; j < board_width; j++)
obj.push_back(Point3f((float)j * square_size, (float)i * square_size, 0));
if (found1 && found2) {//存放所有的物点坐标及左右像点坐标
cout << i << ". Found corners!" << endl;
left_img_points.push_back(corners1);//存左像点坐标
right_img_points.push_back(corners2);//存右像点坐标
object_points.push_back(obj);//存物点坐标
}
}
}
double computeReprojectionErrors(const vector< vector< Point3f > >& objectPoints,
const vector< vector< Point2f > >& imagePoints,
const vector< Mat >& rvecs, const vector< Mat >& tvecs,
const Mat& cameraMatrix, const Mat& distCoeffs) {//将标定结果代入计算误差
vector< Point2f > imagePoints2;//一张图片上的二维角点坐标,用于存储三维点投影后的结果,与标定的到的像点进行对比
int i, totalPoints = 0;
double totalErr = 0, err;
vector< float > perViewErrors;
perViewErrors.resize(objectPoints.size());//大小为图片数量
for (i = 0; i < (int)objectPoints.size(); ++i) {//遍历传入的每一张图片
projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix,
distCoeffs, imagePoints2);//求取三维点投影到二维平面的坐标
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), NORM_L2); //求解对应点的范数,确定误差,传入标定的像点;重投影的像点;求范数类型;所有对应点距离的平方加和开根号
int n = (int)objectPoints[i].size();//第i张图片含有的角点数
perViewErrors[i] = (float)std::sqrt(err * err / n);//第i张标定板测得的平均误差,第i张板的对应点距离平方相加/第i张图片角点个数
totalErr += err * err;//累计误差
totalPoints += n;//参与计算的有效角点坐标
}
return std::sqrt(totalErr / totalPoints);//最终得到所有对应点距离平方加和/总的角点数,为总的平均误差
}
int main(int argc, char const* argv[]) {
//const char* leftcalib_file = "D:/calib/left_calibration.yml";//定义一些变量
//const char* rightcalib_file = "D:/calib/right_calibration.yml";
const char* leftimg_dir = "C:/Users/26839/Desktop/left/";//存放左照片的路径,视自己存放路径而定
const char* rightimg_dir = "C:/Users/26839/Desktop/right/";//存放右照片的路径
const char* leftimg_filename = "left";//左相机存放照片的名字
const char* rightimg_filename = "right";//右相机存放照片的名字
const char* extension = "jpg";//照片格式
const char* out_file = "D:/calib/cal_stereo.yml";//最终存放结果的路径
int num_imgs = 27;
load_image_points(9, 6, num_imgs, 0.02423,
leftimg_dir, rightimg_dir, leftimg_filename, rightimg_filename, extension);//获得左右像点(角点)坐标,以及物点坐标
printf("Starting Calibration\n");
Mat K1, K2, R, F, E;
Vec3d T;
Mat D1, D2;
vector< Mat > rvecsl,rvecsr, tvecsl, tvecsr;//旋转向量和位移向量,用于存放相机和每一个标定板的外参矩阵(旋转+平移)
int flag = 0;//标定函数中所采用的模型
flag |= CALIB_FIX_K4;
flag |= CALIB_FIX_K5;
calibrateCamera(object_points, left_img_points, img1.size(), K1, D1, rvecsl, tvecsl, flag);//对左侧相机单独标定获得内参矩阵、畸变矩阵
calibrateCamera(object_points, right_img_points, img1.size(), K2, D2, rvecsr, tvecsr, flag);//对右侧相机单独标定获得内参矩阵、畸变矩阵
//stereoCalibrate(object_points, left_img_points, right_img_points, K1, D1, K2, D2, img1.size(), R, T, E, F, CALIB_FIX_PRINCIPAL_POINT);//一步进行立体标定。如果用这个方法,可省略前面的两步相机单独标定内外参数,用这个代替下一行的立体标定,但是经检验没有单独标定后再立体标定精度高
stereoCalibrate(object_points, left_img_points, right_img_points, K1, D1, K2, D2, img1.size(), R, T, E, F);//进行立体标定,需要传入前面进行单独标定两部相机的内参矩阵与畸变矩阵作为参数
cv::FileStorage fs1(out_file, cv::FileStorage::WRITE);//将标定结果写入文件夹
fs1 << "K1" << K1;//写入左相机内参矩阵
fs1 << "K2" << K2;//写入右相机内参矩阵
fs1 << "D1" << D1;//写入左相机畸变矩阵
fs1 << "D2" << D2;//写入右相机畸变矩阵
fs1 << "R" << R;//写入两部相机对应的旋转矩阵
fs1 << "T" << T;//写入两部相机对应的平移矩阵
printf("Done Calibration\n");
printf("Starting Rectification\n");
cv::Mat R1, R2, P1, P2, Q;
stereoRectify(K1, D1, K2, D2, img1.size(), R, T, R1, R2, P1, P2, Q);//进行立体校正
cout << "Calibration error: " << computeReprojectionErrors(object_points, left_img_points, rvecsl, tvecsl, K1, D1) << endl;//输出投影的误差
cout << "Calibration error: " << computeReprojectionErrors(object_points, right_img_points, rvecsr, tvecsr, K2, D2) << endl;//输出投影的误差
fs1 << "R1" << R1;
fs1 << "R2" << R2;
fs1 << "P1" << P1;
fs1 << "P2" << P2;
fs1 << "Q" << Q;//深度映射矩阵
printf("Done Rectification\n");
return 0;
}