matlab实现
我用的是matlab8.0,apps中没有直接能用的camera calibrator
- 下载matlab标定工具箱:http://www.vision.caltech.edu/bouguetj/calib_doc/ (工具包下载及介绍和示例教程)
点击download page下载最新的标定工具包。
将解压好的文件放到Matlab安装目录下的toolbox目录下
sudo cp -rf /home/username/下载/TOOLBOX_calib /usr/local/MATLAB/R2013a/toolbox
-
添加路径
点击 Set Path - Add Folder - 选择刚刚的TOOLBOX_calib路径 - OK - Save
-
选择current folder,文件夹内要包含要标定的图像。
-
在matlab command window下,
calib_gui
选择第一个 standard
按read images
填一下basename和image format
按 extract grid corners
设置window size, 默认11×11如果,默认的square size 不一样 要提前clear一下:
clear dX dY
然后选点的时候就会问你方块大小。
继续extract grid corners
依次选择四个边缘角点(不是棋盘格的外轮廓,是检测点最外面的四个角点)
选完了所有的图之后进行5中间可能会遇到边缘角点选择不够准确,需要手动输入X、Y方向的方块数(注意是方块数,不是角点数,而且要注意方向)
-
按calibration, 得到标定参数
其他的功能可以自己尝试一下。
opencv3.4实现
opencv有自己的例程,在/home/username/OpenCV3.4.5/opencv/samples/cpp/tutorial_code/calib3d/camera_calibration下,可以参考。官网讲解:http://www.opencv.org.cn/opencvdoc/2.3.2/html/doc/tutorials/calib3d/camera_calibration/camera_calibration.html#cameracalibrationopencv
using namespace std;
using namespace cv;
//inputs:
// imgVec: input image vector
// boardSize:square num of each board, h*w
// squareSize
///
//outputs: calibrate results
// rvecs
// tvecs
///
//image plane m(u,v)
//board plane Mi(x,y)
//board 3d Qi(x,y,0,1)
void chessBoardCalib(vector<Mat> &imgVec, Size &boardSize, float &squareSize,
vector<Mat> &rvecs, vector<Mat> &tvecs)
{
vector<vector<Point2f>> mPointsSeq;
vector<vector<Point3f>> objPointsSeq;
//board plane corner points in obj coordinate- MiPoints
//image plane corner points- mPoints
vector<Point2f> MiPoints, mPoints;
vector<Point3f> objPoints;
//vector<Mat> Qi;
Size imageSize;
int imgNum = imgVec.size();
for (int i = 0; i < imgNum; i++)
{
Mat src, srcGray;
src = imgVec[i];
// deal with the first img
// get image size
// generate object points -- corners on board plane
if (i == 0)
{
imageSize = src.size();
for (int ii = 0; ii < boardSize.height; ii++)
for (int jj = 0; jj < boardSize.width; jj++)
{
//squareSize = 50;
objPoints.push_back(Point3f(float(jj * squareSize),
float(ii * squareSize), 0));
MiPoints.push_back(Point2f(float(jj * squareSize),
float(ii * squareSize)));
}
}
objPointsSeq.push_back(objPoints);
//find chessboard corners on image plane
bool found = findChessboardCorners(src, boardSize, mPoints,
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE);
if (!found)
{
cout << "No." << i << "image failed to find chessboard corners" << endl;
cout << "Please check and recalibrate..." << endl;
imshow("failed image", src);
waitKey(0);
}
else
{
//subPixel and draw corners
cvtColor(src, srcGray, COLOR_BGR2GRAY);
//Size(11, 11),
cornerSubPix(srcGray, mPoints, Size(11,11),
Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
mPointsSeq.push_back(mPoints);
drawChessboardCorners(src, boardSize, Mat(mPoints), found);
}
// if chessboard not all captured
if (mPoints.size() != boardSize.width * boardSize.height)
{
cerr << "chessboard image is incomplete... " << endl;
cout << "Please check and recalibrate..." << endl;
imshow("failed image", src);
waitKey(0);
}
// // show images
// namedWindow("Image View", WINDOW_NORMAL);
// imshow("Image View", src);
// waitKey(0);
}
//calibrate
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
double rms = calibrateCamera(objPointsSeq, mPointsSeq, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, 0);
cout << "RMS error reported by calibrateCamera: " << rms << endl;
// intrinsic matrix: A = cameraMatrix
cout << "cameraMatrix is " << endl
<< cameraMatrix << endl
<< endl;
cout << "distCoeffs is " << endl
<< distCoeffs << endl;
cout << "tvecs[0]" << tvecs[0] << endl;
// // calculate the reproject error, the same with rms
// vector<Point2f> imagePoints2;
// double totalError = 0,error;
// int totalPoints = 0;
// for (int i=0; i<imgNum; i++){
// //Mat Rvec;
// //Rodrigues(rvecs[i],Rvec);
// projectPoints(objPointsSeq[i],rvecs[i],tvecs[i],cameraMatrix,
// distCoeffs,imagePoints2);
// error = norm(mPointsSeq[i],imagePoints2, CV_L2);
// int n = objPointsSeq[i].size();
// totalPoints += n;
// totalError += error*error;
// }
// totalError = sqrt(totalError/totalPoints);
// cout << "totalError is "<< endl <<totalError<<endl;
}
对比
放在一起对比一下(上opencv,下matlab)。principal point 对应 cx,cy, 即内参矩阵最后一列的前两个数字。
MATLAB和OpenCV使用基本相同的校准算法。但是,MATLAB使用Levenberg-Marquardt非线性最小二乘算法进行优化(参见文档),而OpenCV使用梯度下降。我猜这可以解释重投影错误的大部分差异。此外,MATLAB和OpenCV使用不同的算法进行棋盘检测。参考:https://stackoverflow.com/questions/24290086/cv-difference-between-matlab-and-opencv-camera-calibration-techniques