1、opencv标定法
在这里我只介绍全景相机中单摄像机的标定部分:
第一步,选定一个标定模式,然后拍摄一些图片。Opencv中支持一些种类的模式,像棋盘格和圆形。也可以使用一个名为随机模式的新模式,具体的可以参考opencv_contrib/modules/ccalib for more details。
第二步,从标定模式中提取角点。对于棋盘格模式,可以使用OpenCV中的函数 cv::findChessboardCorners
,对于圆形网格模式,可以使用函数cv::findCirclesGrid,对于随机模式,使用randomPatternCornerFinder类,该类位于opencv_contrib/modules/ccalib/src/randomPattern.hp中。图像中角点的位置坐标储存在变量imagePoints中。imagePoints的类型可能是std::vector<std::vector<cv::Vec2f>>,内层矢量存储每一棋盘格中的角点,外层矢量存储单帧图片中的所有角点。该类型也可以是 std::vector<cv::Mat>
,这里Mat是cv::Mat
is CV_32FC2
。
同时,相应的世界(标定板)坐标系也是必需的,如果知道标定板的物理尺寸可以自己计算它。3D点存储在 objectPoints
中,与imagePoints类似,它也可以是 std::vector<std::vector<Vec3f>>
或std::vector<cv::Mat>
where cv::Mat
类型,Mat为CV_32FC3类型。注意,objectPoints和imagePoints的大小尺寸必须一致,因为它们是相互对应的。
另外,还需要输入图像尺寸。opencv_contrib/modules/ccalib/tutorial/data/omni_calib_data.xml 文件中存储了一个objectPoints,imagePoints 和imageSize的示例。使用下面的程序加载他们:
cv::FileStorage fs("omni_calib_data.xml", cv::FileStorage::READ);
std::vector<cv::Mat> objectPoints, imagePoints;
cv::Size imgSize;
fs["objectPoints"] >> objectPoints;
fs["imagePoints"] >> imagePoints;
fs["imageSize"] >> imgSize;
然后,定义一些变量去存储输出参数,并运行标定函数:
cv::Mat K, xi, D, idx;
int flags = 0;
cv::TermCriteria critia(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 200, 0.0001);
std::vector<cv::Mat> rvecs, tvecs;
double rms = cv::omnidir::calibrate(objectPoints, imagePoints, imgSize, K, xi, D, rvecs, tvecs, flags, critia, idx);
K,xi,D是内参。revecs和tvecs是外参,储存了标定板的姿态位置。它们都是 CV_64F深度的,Xi是 Mei's model中的一个单值变量。idx是一个
CV_32S
类型的Mat,用来存储那些真正被识别出棋盘格角点的图像的序列号。这是由于一些图像在初使化的时候失败了以至于他们在最后优化的时个不能被使用。返回值rms是重投影均方误差。
标定支持一些特性,flags是这些特性的一个枚举,包括:
- cv::omnidir::CALIB_FIX_SKEW
- cv::omnidir::CALIB_FIX_K1
- cv::omnidir::CALIB_FIX_K2
- cv::omnidir::CALIB_FIX_P1
- cv::omnidir::CALIB_FIX_P2
- cv::omnidir::CALIB_FIX_XI
- cv::omnidir::CALIB_FIX_GAMMA
- cv::omnidir::CALIB_FIX_CENTER
criteria是优化期间停止迭代的标准,例如,可以设置它为 cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 200, 0.0001),这表示迭代200次,当相对变化量小于0.0001时停止迭代。
这里就不上OpenCV中的运行结果了,我自己另外下了一组图片,然后进行了重新的标定与矫正。
程序:
int main(int argc, char** argv)
{
Size boardSize, imageSize;
int flags = 0;
double square_width = 0.0, square_height = 0.0;
const char* outputFilename = "out_camera_params.xml";
const char* inputFilename = "image_list.xml";
vector<Mat> objectPoints;
vector<Mat> imagePoints;
boardSize.width = 8;
boardSize.height = 6;
square_width = 0.1;
square_height = 0.1;
if(boardSize.height <= 0 || boardSize.width<=0 )
return fprintf( stderr, "Invalid board height\n" ),-1;
// get image name list
vector<string> image_list, detec_list;
//if(!readStringList(inputFilename, image_list))
// return fprintf( stderr, "Failed to read image list\n"), -1;
if(!getAllFiles(inputFilename, image_list))
return fprintf( stderr, "Failed to read image list\n"), -1;
// get image name list
vector<string> image_list, detec_list;
if(!readStringList(inputFilename, image_list))
return fprintf( stderr, "Failed to read image list\n"), -1;
// find corners in images
// some images may be failed in automatic corner detection, passed cases are in detec_list
vector<Mat> vecImage;
if(!detecChessboardCorners(image_list, detec_list, imagePoints,vecImage, boardSize, imageSize))
return fprintf(stderr, "Not enough corner detected images\n"), -1;
// calculate object coordinates
Mat object;
calcChessboardCorners(boardSize, square_width, square_height, object);
for(int i = 0; i < (int)detec_list.size(); ++i)
objectPoints.push_back(object);
vector<Mat> objectPoints_,imagePoints_;
for(int ii = 0;ii<objectPoints.size();ii++)
{
Mat corner;
objectPoints[ii].convertTo(corner,CV_32F);
objectPoints_.push_back(corner);
Mat corner_;
imagePoints[ii].convertTo(corner_,CV_32F);
imagePoints_.push_back(corner_);
}
// run calibration, some images are discarded in calibration process because they are failed
// in initialization. Retained image indexes are in idx variable.
Mat K, D, xi, idx;
vector<Vec3d> rvecs, tvecs;
double _xi, rms;
TermCriteria criteria(3, 200, 1e-8);
rms = omnidir::calibrate(objectPoints_, imagePoints_, imageSize, K, xi, D, rvecs, tvecs, flags, criteria, idx);
_xi = xi.at<double>(0);
saveCameraParams(outputFilename, flags, K, D, _xi,rvecs, tvecs, detec_list, idx, rms, imagePoints);
Mat distorted = imread("img_para_001.jpg",2|4);
Mat undistorted;
//Size new_size(distorted.cols,distorted.rows);
Size new_size(distorted.cols*2,distorted.rows*2);
cv::omnidir::undistortImage(distorted, undistorted, K, D, xi, 2, Mat(), new_size);
}
程序运行结果:
2、折反射全景相机同心圆环近似展开
同心圆环近似展开法不需要对相机使用棋盘格进行标定,这样省去了很多不必要的麻烦,同时效果也还不错。
Mat distorted = imread("17_00_05.jpg",2|4);
double cx = distorted.cols/2 + 25;
double cy = distorted.rows/2 + 15;
circle(distorted, Point(cx,cy), 3, Scalar(0, 255, 0), -1);
circle(distorted, Point(cx,cy), 200, Scalar(0, 0, 255), 3);
circle(distorted, Point(cx,cy), cy, Scalar(0, 0, 255), 3);
double Rs = 200;
double Rb = cy - 25;
int cylinderHeight = (Rb - Rs);
int cylinderWidth = 2 * PI * (Rb - Rs) /2;
double s = 4 ;//生成柱面全景图的长宽校正因子
Mat panoImage = Mat::zeros(cylinderHeight*3/4,cylinderWidth,distorted.type());
int panoHeight = panoImage.rows;
int panoWidth = panoImage.cols;
Size newImageSize = Size(panoWidth,panoHeight);
double ratioHeight = (double)cylinderHeight / panoHeight;
double ratioWidth = (double)cylinderWidth / panoWidth;
Mat rmap[2];
rmap[0].create(newImageSize, CV_32F);//x
rmap[1].create(newImageSize, CV_32F);//y
for(int i = 0;i<panoHeight;i++)
{
float* m1f = rmap[0].ptr<float>(i);//x
float* m2f = rmap[1].ptr<float>(i);//y
for(int j = 0;j<panoWidth;j++)
{
double _x = ratioWidth * (panoWidth - j);//上下左右转换
double _y = ratioHeight * (panoHeight - i);
//同心圆环近似展开
double alpha = s * (double)_x / cylinderHeight /2;// l / r = theta
double u = cx + (Rs + (double)_y) * sin(alpha);
double v = cy + (Rs + (double)_y) * cos(alpha);
m1f[j] = (float)u;
m2f[j] = (float)v;
}
}
remap(distorted, panoImage, rmap[0], rmap[1], INTER_LINEAR, BORDER_CONSTANT);
程序运行结果: