OpenCV 标定和畸变校正

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/u013498583/article/details/71404323
                                        <link rel="stylesheet" href="https://csdnimg.cn/release/phoenix/template/css/ck_htmledit_views-e2445db1a8.css">
                    <div class="htmledit_views">

1.摄像机成像原理

成像的过程实质上是4个坐标系的转换。首先空间中的一点由 世界坐标系 转换到 摄像机坐标系 ,然后再将其投影到成像平面 (图像物理坐标系 ) ,最后再将成像平面上的数据转换到图像平面 (图像像素坐标系 ) 。

 


下文对4个坐标系的 变换做了详细的解释:

http://blog.csdn.net/humanking7/article/details/44756073

2.畸变模型

图像像素坐标系   (uOv坐标系)  下的无畸变坐标  (U, V) ,经过  径向畸变  和  切向畸变  后落在了 uOv坐标系  的  (Ud, Vd)  上。即就是说, 真实图像 imgR  与  畸变图像 imgD  之间的关系为:  imgR(U, V) = imgD(Ud, Vd)  




OpenCV的document中介绍如下:

http://www.opencv.org.cn/opencvdoc/2.3.2/html/doc/tutorials/calib3d/camera_calibration/camera_calibration.html#cameracalibrationopencv



所以标定的目标是就是确定这5个参数的值。


3.标定板

标定的最开始阶段最需要的肯定是标定板。可以直接从opencv官网上能下载到: 

http://docs.opencv.org/2.4/_downloads/pattern.png


4.图像采集

尽量覆盖摄像机的各个角度,多拍几张照片(必须大于1张)




5.标定

OpenCV的例程来进行标定,在你的opencv目录下 
sources\samples\cpp\tutorial_code\calib3d\camera_calibration 
有3个文件 :

camera_calibration.cpp 
VID5.xml 
in_VID5.xml

第一个是标定程序的源代码。 
第二个是配置文件,你可以更改标定图片获取的方式以及标定板的一些参数。 
第三个里面可以修改标定图片序列的文件名。

代码:


   
   
  1. #include <iostream>
  2. #include <sstream>
  3. #include <time.h>
  4. #include <stdio.h>
  5. #include <opencv2/core/core.hpp>
  6. #include <opencv2/imgproc/imgproc.hpp>
  7. #include <opencv2/calib3d/calib3d.hpp>
  8. #include <opencv2/highgui/highgui.hpp>
  9. #ifndef _CRT_SECURE_NO_WARNINGS
  10. # define _CRT_SECURE_NO_WARNINGS
  11. #endif
  12. using namespace cv;
  13. using namespace std;
  14. static void help()
  15. {
  16. cout << “This is a camera calibration sample.” << endl
  17. << “Usage: calibration configurationFile” << endl
  18. << “Near the sample file you’ll find the configuration file, which has detailed help of “
  19. “how to edit it. It may be any OpenCV supported file format XML/YAML.” << endl;
  20. }
  21. class Settings
  22. {
  23. public:
  24. Settings() : goodInput( false) {}
  25. enum Pattern { NOT_EXISTING, CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
  26. enum InputType {INVALID, CAMERA, VIDEO_FILE, IMAGE_LIST};
  27. void write(FileStorage& fs) const //Write serialization for this class
  28. {
  29. fs << “{“ << “BoardSize_Width” << boardSize.width
  30. << “BoardSize_Height” << boardSize.height
  31. << “Square_Size” << squareSize
  32. << “Calibrate_Pattern” << patternToUse
  33. << “Calibrate_NrOfFrameToUse” << nrFrames
  34. << “Calibrate_FixAspectRatio” << aspectRatio
  35. << “Calibrate_AssumeZeroTangentialDistortion” << calibZeroTangentDist
  36. << “Calibrate_FixPrincipalPointAtTheCenter” << calibFixPrincipalPoint
  37. << “Write_DetectedFeaturePoints” << bwritePoints
  38. << “Write_extrinsicParameters” << bwriteExtrinsics
  39. << “Write_outputFileName” << outputFileName
  40. << “Show_UndistortedImage” << showUndistorsed
  41. << “Input_FlipAroundHorizontalAxis” << flipVertical
  42. << “Input_Delay” << delay
  43. << “Input” << input
  44. << “}”;
  45. }
  46. void read(const FileNode& node) //Read serialization for this class
  47. {
  48. node[ “BoardSize_Width” ] >> boardSize.width;
  49. node[ “BoardSize_Height”] >> boardSize.height;
  50. node[ “Calibrate_Pattern”] >> patternToUse;
  51. node[ “Square_Size”] >> squareSize;
  52. node[ “Calibrate_NrOfFrameToUse”] >> nrFrames;
  53. node[ “Calibrate_FixAspectRatio”] >> aspectRatio;
  54. node[ “Write_DetectedFeaturePoints”] >> bwritePoints;
  55. node[ “Write_extrinsicParameters”] >> bwriteExtrinsics;
  56. node[ “Write_outputFileName”] >> outputFileName;
  57. node[ “Calibrate_AssumeZeroTangentialDistortion”] >> calibZeroTangentDist;
  58. node[ “Calibrate_FixPrincipalPointAtTheCenter”] >> calibFixPrincipalPoint;
  59. node[ “Input_FlipAroundHorizontalAxis”] >> flipVertical;
  60. node[ “Show_UndistortedImage”] >> showUndistorsed;
  61. node[ “Input”] >> input;
  62. node[ “Input_Delay”] >> delay;
  63. interprate();
  64. }
  65. void interprate()
  66. {
  67. goodInput = true;
  68. if (boardSize.width <= 0 || boardSize.height <= 0)
  69. {
  70. cerr << “Invalid Board size: “ << boardSize.width << ” “ << boardSize.height << endl;
  71. goodInput = false;
  72. }
  73. if (squareSize <= 10e-6)
  74. {
  75. cerr << “Invalid square size “ << squareSize << endl;
  76. goodInput = false;
  77. }
  78. if (nrFrames <= 0)
  79. {
  80. cerr << “Invalid number of frames “ << nrFrames << endl;
  81. goodInput = false;
  82. }
  83. if (input.empty()) // Check for valid input
  84. inputType = INVALID;
  85. else
  86. {
  87. if (input[ 0] >= ‘0’ && input[ 0] <= ‘9’)
  88. {
  89. stringstream ss(input);
  90. ss >> cameraID;
  91. inputType = CAMERA;
  92. }
  93. else
  94. {
  95. if (readStringList(input, imageList))
  96. {
  97. inputType = IMAGE_LIST;
  98. nrFrames = (nrFrames < ( int)imageList.size()) ? nrFrames : ( int)imageList.size();
  99. }
  100. else
  101. inputType = VIDEO_FILE;
  102. }
  103. if (inputType == CAMERA)
  104. inputCapture.open(cameraID);
  105. if (inputType == VIDEO_FILE)
  106. inputCapture.open(input);
  107. if (inputType != IMAGE_LIST && !inputCapture.isOpened())
  108. inputType = INVALID;
  109. }
  110. if (inputType == INVALID)
  111. {
  112. cerr << ” Inexistent input: “ << input;
  113. goodInput = false;
  114. }
  115. flag = 0;
  116. if(calibFixPrincipalPoint) flag |= CV_CALIB_FIX_PRINCIPAL_POINT;
  117. if(calibZeroTangentDist) flag |= CV_CALIB_ZERO_TANGENT_DIST;
  118. if(aspectRatio) flag |= CV_CALIB_FIX_ASPECT_RATIO;
  119. calibrationPattern = NOT_EXISTING;
  120. if (!patternToUse.compare( “CHESSBOARD”)) calibrationPattern = CHESSBOARD;
  121. if (!patternToUse.compare( “CIRCLES_GRID”)) calibrationPattern = CIRCLES_GRID;
  122. if (!patternToUse.compare( “ASYMMETRIC_CIRCLES_GRID”)) calibrationPattern = ASYMMETRIC_CIRCLES_GRID;
  123. if (calibrationPattern == NOT_EXISTING)
  124. {
  125. cerr << ” Inexistent camera calibration mode: “ << patternToUse << endl;
  126. goodInput = false;
  127. }
  128. atImageList = 0;
  129. }
  130. Mat nextImage()
  131. {
  132. Mat result;
  133. if( inputCapture.isOpened() )
  134. {
  135. Mat view0;
  136. inputCapture >> view0;
  137. view0.copyTo(result);
  138. }
  139. else if( atImageList < ( int)imageList.size() )
  140. result = imread(imageList[atImageList++], CV_LOAD_IMAGE_COLOR);
  141. return result;
  142. }
  143. static bool readStringList( const string& filename, vector<string>& l )
  144. {
  145. l.clear();
  146. FileStorage fs(filename, FileStorage::READ);
  147. if( !fs.isOpened() )
  148. return false;
  149. FileNode n = fs.getFirstTopLevelNode();
  150. if( n.type() != FileNode::SEQ )
  151. return false;
  152. FileNodeIterator it = n.begin(), it_end = n.end();
  153. for( ; it != it_end; ++it )
  154. l.push_back(( string)*it);
  155. return true;
  156. }
  157. public:
  158. Size boardSize; // The size of the board -> Number of items by width and height
  159. Pattern calibrationPattern; // One of the Chessboard, circles, or asymmetric circle pattern
  160. float squareSize; // The size of a square in your defined unit (point, millimeter,etc).
  161. int nrFrames; // The number of frames to use from the input for calibration
  162. float aspectRatio; // The aspect ratio
  163. int delay; // In case of a video input
  164. bool bwritePoints; // Write detected feature points
  165. bool bwriteExtrinsics; // Write extrinsic parameters
  166. bool calibZeroTangentDist; // Assume zero tangential distortion
  167. bool calibFixPrincipalPoint; // Fix the principal point at the center
  168. bool flipVertical; // Flip the captured images around the horizontal axis
  169. string outputFileName; // The name of the file where to write
  170. bool showUndistorsed; // Show undistorted images after calibration
  171. string input; // The input ->
  172. int cameraID;
  173. vector< string> imageList;
  174. int atImageList;
  175. VideoCapture inputCapture;
  176. InputType inputType;
  177. bool goodInput;
  178. int flag;
  179. private:
  180. string patternToUse;
  181. };
  182. static void read(const FileNode& node, Settings& x, const Settings& default_value = Settings())
  183. {
  184. if(node.empty())
  185. x = default_value;
  186. else
  187. x.read(node);
  188. }
  189. enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
  190. bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,
  191. vector< vector<Point2f> > imagePoints );
  192. int main(int argc, char* argv[])
  193. {
  194. help();
  195. Settings s;
  196. const string inputSettingsFile = argc > 1 ? argv[ 1] : “default.xml”;
  197. FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
  198. if (!fs.isOpened())
  199. {
  200. cout << “Could not open the configuration file: \”“ << inputSettingsFile << “\”“ << endl;
  201. return -1;
  202. }
  203. fs[ “Settings”] >> s;
  204. fs.release(); // close Settings file
  205. if (!s.goodInput)
  206. {
  207. cout << “Invalid input detected. Application stopping. “ << endl;
  208. return -1;
  209. }
  210. vector< vector<Point2f> > imagePoints;
  211. Mat cameraMatrix, distCoeffs;
  212. Size imageSize;
  213. int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION;
  214. clock_t prevTimestamp = 0;
  215. const Scalar RED(0,0,255), GREEN(0,255,0);
  216. const char ESC_KEY = 27;
  217. for( int i = 0;;++i)
  218. {
  219. Mat view;
  220. bool blinkOutput = false;
  221. view = s.nextImage();
  222. //—– If no more image, or got enough, then stop calibration and show result ————-
  223. if( mode == CAPTURING && imagePoints.size() >= ( unsigned)s.nrFrames )
  224. {
  225. if( runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints))
  226. mode = CALIBRATED;
  227. else
  228. mode = DETECTION;
  229. }
  230. if(view.empty()) // If no more images then run calibration, save and stop loop.
  231. {
  232. if( imagePoints.size() > 0 )
  233. runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
  234. break;
  235. }
  236. imageSize = view.size(); // Format input image.
  237. if( s.flipVertical ) flip( view, view, 0 );
  238. vector<Point2f> pointBuf;
  239. bool found;
  240. switch( s.calibrationPattern ) // Find feature points on the input format
  241. {
  242. case Settings::CHESSBOARD:
  243. found = findChessboardCorners( view, s.boardSize, pointBuf,
  244. CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
  245. break;
  246. case Settings::CIRCLES_GRID:
  247. found = findCirclesGrid( view, s.boardSize, pointBuf );
  248. break;
  249. case Settings::ASYMMETRIC_CIRCLES_GRID:
  250. found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID );
  251. break;
  252. default:
  253. found = false;
  254. break;
  255. }
  256. if (found) // If done with success,
  257. {
  258. // improve the found corners’ coordinate accuracy for chessboard
  259. if( s.calibrationPattern == Settings::CHESSBOARD)
  260. {
  261. Mat viewGray;
  262. cvtColor(view, viewGray, COLOR_BGR2GRAY);
  263. cornerSubPix( viewGray, pointBuf, Size( 11, 11),
  264. Size( -1, -1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
  265. }
  266. if( mode == CAPTURING && // For camera only take new samples after delay time
  267. (!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay* 1e-3*CLOCKS_PER_SEC) )
  268. {
  269. imagePoints.push_back(pointBuf);
  270. prevTimestamp = clock();
  271. blinkOutput = s.inputCapture.isOpened();
  272. }
  273. // Draw the corners.
  274. drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found );
  275. }
  276. //—————————– Output Text ————————————————
  277. string msg = (mode == CAPTURING) ? “100/100” :
  278. mode == CALIBRATED ? “Calibrated” : “Press ‘g’ to start”;
  279. int baseLine = 0;
  280. Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
  281. Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);
  282. if( mode == CAPTURING )
  283. {
  284. if(s.showUndistorsed)
  285. msg = format( “%d/%d Undist”, ( int)imagePoints.size(), s.nrFrames );
  286. else
  287. msg = format( “%d/%d”, ( int)imagePoints.size(), s.nrFrames );
  288. }
  289. putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED);
  290. if( blinkOutput )
  291. bitwise_not(view, view);
  292. //————————- Video capture output undistorted ——————————
  293. if( mode == CALIBRATED && s.showUndistorsed )
  294. {
  295. Mat temp = view.clone();
  296. undistort(temp, view, cameraMatrix, distCoeffs);
  297. }
  298. //—————————— Show image and check for input commands ——————-
  299. imshow( “Image View”, view);
  300. char key = ( char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay);
  301. if( key == ESC_KEY )
  302. break;
  303. if( key == ‘u’ && mode == CALIBRATED )
  304. s.showUndistorsed = !s.showUndistorsed;
  305. if( s.inputCapture.isOpened() && key == ‘g’ )
  306. {
  307. mode = CAPTURING;
  308. imagePoints.clear();
  309. }
  310. }
  311. // ———————–Show and save the undistorted image for the image list ————————
  312. if( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed )
  313. {
  314. Mat view, rview, map1, map2;
  315. initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
  316. getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
  317. imageSize, CV_16SC2, map1, map2);
  318. for( int i = 0; i < ( int)s.imageList.size(); i++ )
  319. {
  320. view = imread(s.imageList[i], 1);
  321. if(view.empty())
  322. continue;
  323. remap(view, rview, map1, map2, INTER_LINEAR);
  324. imshow( “Image View”, rview);
  325. string imageName = format( “undistorted_%d.jpg”, i);
  326. imwrite(imageName,rview);
  327. char c = ( char)waitKey();
  328. if( c == ESC_KEY || c == ‘q’ || c == ‘Q’ )
  329. break;
  330. }
  331. }
  332. return 0;
  333. }
  334. static double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,
  335. const vector< vector<Point2f> >& imagePoints,
  336. const vector<Mat>& rvecs, const vector<Mat>& tvecs,
  337. const Mat& cameraMatrix , const Mat& distCoeffs,
  338. vector< float>& perViewErrors)
  339. {
  340. vector<Point2f> imagePoints2;
  341. int i, totalPoints = 0;
  342. double totalErr = 0, err;
  343. perViewErrors.resize(objectPoints.size());
  344. for( i = 0; i < ( int)objectPoints.size(); ++i )
  345. {
  346. projectPoints( Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix,
  347. distCoeffs, imagePoints2);
  348. err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2);
  349. int n = ( int)objectPoints[i].size();
  350. perViewErrors[i] = ( float) std:: sqrt(err*err/n);
  351. totalErr += err*err;
  352. totalPoints += n;
  353. }
  354. return std:: sqrt(totalErr/totalPoints);
  355. }
  356. static void calcBoardCornerPositions(Size boardSize, float squareSize, vector<Point3f>& corners,
  357. Settings::Pattern patternType /*= Settings::CHESSBOARD*/)
  358. {
  359. corners.clear();
  360. switch(patternType)
  361. {
  362. case Settings::CHESSBOARD:
  363. case Settings::CIRCLES_GRID:
  364. for( int i = 0; i < boardSize.height; ++i )
  365. for( int j = 0; j < boardSize.width; ++j )
  366. corners.push_back(Point3f( float( j*squareSize ), float( i*squareSize ), 0));
  367. break;
  368. case Settings::ASYMMETRIC_CIRCLES_GRID:
  369. for( int i = 0; i < boardSize.height; i++ )
  370. for( int j = 0; j < boardSize.width; j++ )
  371. corners.push_back(Point3f( float(( 2*j + i % 2)*squareSize), float(i*squareSize), 0));
  372. break;
  373. default:
  374. break;
  375. }
  376. }
  377. static bool runCalibration( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
  378. vector< vector<Point2f> > imagePoints, vector<Mat>& rvecs, vector<Mat>& tvecs,
  379. vector< float>& reprojErrs, double& totalAvgErr)
  380. {
  381. cameraMatrix = Mat::eye( 3, 3, CV_64F);
  382. if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
  383. cameraMatrix.at< double>( 0, 0) = 1.0;
  384. distCoeffs = Mat::zeros( 8, 1, CV_64F);
  385. vector< vector<Point3f> > objectPoints( 1);
  386. calcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[ 0], s.calibrationPattern);
  387. objectPoints.resize(imagePoints.size(),objectPoints[ 0]);
  388. //Find intrinsic and extrinsic camera parameters
  389. double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
  390. distCoeffs, rvecs, tvecs, s.flag|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
  391. cout << “Re-projection error reported by calibrateCamera: “<< rms << endl;
  392. bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
  393. totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints,
  394. rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs);
  395. return ok;
  396. }
  397. // Print camera parameters to the output file
  398. static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
  399. const vector<Mat>& rvecs, const vector<Mat>& tvecs,
  400. const vector< float>& reprojErrs, const vector< vector<Point2f> >& imagePoints,
  401. double totalAvgErr )
  402. {
  403. FileStorage fs( s.outputFileName, FileStorage::WRITE );
  404. time_t tm;
  405. time( &tm );
  406. struct tm *t2 = localtime( &tm );
  407. char buf[ 1024];
  408. strftime( buf, sizeof(buf) -1, “%c”, t2 );
  409. fs << “calibration_Time” << buf;
  410. if( !rvecs.empty() || !reprojErrs.empty() )
  411. fs << “nrOfFrames” << ( int) std::max(rvecs.size(), reprojErrs.size());
  412. fs << “image_Width” << imageSize.width;
  413. fs << “image_Height” << imageSize.height;
  414. fs << “board_Width” << s.boardSize.width;
  415. fs << “board_Height” << s.boardSize.height;
  416. fs << “square_Size” << s.squareSize;
  417. if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
  418. fs << “FixAspectRatio” << s.aspectRatio;
  419. if( s.flag )
  420. {
  421. sprintf( buf, “flags: %s%s%s%s”,
  422. s.flag & CV_CALIB_USE_INTRINSIC_GUESS ? ” +use_intrinsic_guess” : “”,
  423. s.flag & CV_CALIB_FIX_ASPECT_RATIO ? ” +fix_aspectRatio” : “”,
  424. s.flag & CV_CALIB_FIX_PRINCIPAL_POINT ? ” +fix_principal_point” : “”,
  425. s.flag & CV_CALIB_ZERO_TANGENT_DIST ? ” +zero_tangent_dist” : “” );
  426. cvWriteComment( *fs, buf, 0 );
  427. }
  428. fs << “flagValue” << s.flag;
  429. fs << “Camera_Matrix” << cameraMatrix;
  430. fs << “Distortion_Coefficients” << distCoeffs;
  431. fs << “Avg_Reprojection_Error” << totalAvgErr;
  432. if( !reprojErrs.empty() )
  433. fs << “Per_View_Reprojection_Errors” << Mat(reprojErrs);
  434. if( !rvecs.empty() && !tvecs.empty() )
  435. {
  436. CV_Assert(rvecs[ 0].type() == tvecs[ 0].type());
  437. Mat bigmat((int)rvecs.size(), 6, rvecs[0].type());
  438. for( int i = 0; i < ( int)rvecs.size(); i++ )
  439. {
  440. Mat r = bigmat(Range(i, i+ 1), Range( 0, 3));
  441. Mat t = bigmat(Range(i, i+ 1), Range( 3, 6));
  442. CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1);
  443. CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1);
  444. //*.t() is MatExpr (not Mat) so we can use assignment operator
  445. r = rvecs[i].t();
  446. t = tvecs[i].t();
  447. }
  448. cvWriteComment( *fs, “a set of 6-tuples (rotation vector + translation vector) for each view”, 0 );
  449. fs << “Extrinsic_Parameters” << bigmat;
  450. }
  451. if( !imagePoints.empty() )
  452. {
  453. Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
  454. for( int i = 0; i < ( int)imagePoints.size(); i++ )
  455. {
  456. Mat r = imagePtMat.row(i).reshape( 2, imagePtMat.cols);
  457. Mat imgpti(imagePoints[i]);
  458. imgpti.copyTo(r);
  459. }
  460. fs << “Image_points” << imagePtMat;
  461. }
  462. }
  463. bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,vector<vector<Point2f> > imagePoints )
  464. {
  465. vector<Mat> rvecs, tvecs;
  466. vector< float> reprojErrs;
  467. double totalAvgErr = 0;
  468. bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs,
  469. reprojErrs, totalAvgErr);
  470. cout << (ok ? “Calibration succeeded” : “Calibration failed”)
  471. << “. avg re projection error = “ << totalAvgErr ;
  472. if( ok )
  473. saveCameraParams( s, imageSize, cameraMatrix, distCoeffs, rvecs ,tvecs, reprojErrs,
  474. imagePoints, totalAvgErr);
  475. return ok;
  476. }

下面是我的in_VID5.xml文件


   
   
  1. <?xml version=“1.0”?>
  2. <opencv_storage>
  3. <Settings>
  4. <!– Number of inner corners per a item row and column. (square, circle) –>
  5. <BoardSize_Width> 9 </BoardSize_Width>
  6. <BoardSize_Height>6 </BoardSize_Height>
  7. <!– The size of a square in some user defined metric system (pixel, millimeter)–>
  8. <Square_Size>50 </Square_Size>
  9. <!– The type of input used for camera calibration. One of: CHESSBOARD CIRCLES_GRID ASYMMETRIC_CIRCLES_GRID –>
  10. <Calibrate_Pattern>”CHESSBOARD” </Calibrate_Pattern>
  11. <!– The input to use for calibration.
  12. To use an input camera -> give the ID of the camera, like “1”
  13. To use an input video -> give the path of the input video, like “/tmp/x.avi”
  14. To use an image list -> give the path to the XML or YAML file containing the list of the images, like “/tmp/circles_list.xml”
  15. –>
  16. <Input>”E:/wangyuchi/code/calibration/VID5/VID5.xml” </Input>
  17. <!– If true (non-zero) we flip the input images around the horizontal axis.–>
  18. <Input_FlipAroundHorizontalAxis>0 </Input_FlipAroundHorizontalAxis>
  19. <!– Time delay between frames in case of camera. –>
  20. <Input_Delay>100 </Input_Delay>
  21. <!– How many frames to use, for calibration. –>
  22. <Calibrate_NrOfFrameToUse>25 </Calibrate_NrOfFrameToUse>
  23. <!– Consider only fy as a free parameter, the ratio fx/fy stays the same as in the input cameraMatrix.
  24. Use or not setting. 0 - False Non-Zero - True–>
  25. <Calibrate_FixAspectRatio> 1 </Calibrate_FixAspectRatio>
  26. <!– If true (non-zero) tangential distortion coefficients are set to zeros and stay zero.–>
  27. <Calibrate_AssumeZeroTangentialDistortion>1 </Calibrate_AssumeZeroTangentialDistortion>
  28. <!– If true (non-zero) the principal point is not changed during the global optimization.–>
  29. <Calibrate_FixPrincipalPointAtTheCenter> 1 </Calibrate_FixPrincipalPointAtTheCenter>
  30. <!– The name of the output log file. –>
  31. <Write_outputFileName>”out_camera_data.xml” </Write_outputFileName>
  32. <!– If true (non-zero) we write to the output file the feature points.–>
  33. <Write_DetectedFeaturePoints>1 </Write_DetectedFeaturePoints>
  34. <!– If true (non-zero) we write to the output file the extrinsic camera parameters.–>
  35. <Write_extrinsicParameters>1 </Write_extrinsicParameters>
  36. <!– If true (non-zero) we show after calibration the undistorted images.–>
  37. <Show_UndistortedImage>1 </Show_UndistortedImage>
  38. </Settings>
  39. </opencv_storage>

只修改了这一行,是VID5.xml的路径:
  <Input>"E:/wangyuchi/code/calibration/VID5/VID5.xml"</Input>
   
   

我的VID5.xml文件:


   
   
  1. <?xml version="1.0"?>
  2. <opencv_storage>
  3. <images>
  4. E:/wangyuchi/code/calibration/VID5/2017.05.05/5.jpg
  5. E:/wangyuchi/code/calibration/VID5/2017.05.05/6.jpg
  6. E:/wangyuchi/code/calibration/VID5/2017.05.05/7.jpg
  7. </images>
  8. </opencv_storage>

以下是待标定的图片路径:

   
   
  1. E:/wangyuchi/code/calibration/VID5/2017.05.05/5.jpg
  2. E:/wangyuchi/code/calibration/VID5/2017.05.05/6.jpg
  3. E:/wangyuchi/code/calibration/VID5/2017.05.05/7.jpg

标定结果:




工程目录下会生成一个out_camera_data.xml文件,里面记录摄像头标定的一些参数,以后可以直接使用。我们用它略去标定的步骤,直接校正摄像机,这样标定好一次就可以一直使用了。 

6.校正

得到 out_camera_data.xml文件后,我们可以直接使用该配置文件进行校正。


   
   
  1. #include <opencv2/opencv.hpp>
  2. #include <opencv2/calib3d/calib3d.hpp>
  3. using namespace std;
  4. using namespace cv;
  5. /**
  6. * @主函数
  7. */
  8. int main( int argc, char** argv )
  9. {
  10. /// 读取一副图片,不改变图片本身的颜色类型(该读取方式为DOS运行模式)
  11. Mat src = imread( argv[ 1], 1 );
  12. Mat distortion = src.clone();
  13. Mat camera_matrix = Mat( 3, 3, CV_32FC1);
  14. Mat distortion_coefficients;
  15. //导入相机内参和畸变系数矩阵
  16. FileStorage file_storage(”out_camera_data.xml”, FileStorage::READ);
  17. file_storage[ “Camera_Matrix”] >> camera_matrix;
  18. file_storage[ “Distortion_Coefficients”] >> distortion_coefficients;
  19. file_storage.release();
  20. //矫正
  21. undistort(src, distortion, camera_matrix, distortion_coefficients);
  22. imshow( “img”, src);
  23. imshow( “undistort”, distortion);
  24. imwrite( “undistort.jpg”, distortion);
  25. waitKey( 0);
  26. return 0;
  27. }

7.参考

1. http://blog.csdn.net/qq_23845067/article/details/52105811
2. http://blog.csdn.net/humanking7/article/details/45037239





        </div>
            </div>
  • 1
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
OpenCV中的相机标定方法可以对径向畸变进行有效校正。在使用相机标定方法时,需要提供内参和畸变系数。内参是相机的内部参数,包括焦距、主点坐标等,畸变系数描述了图像的径向畸变切向畸变。 在OpenCV中,可以通过以下代码实现相机标定畸变校正: ```cpp #include <opencv2/calib3d.hpp> using namespace std; using namespace cv; // 定义相机标定的相关常量设置与变量 vector<string> files; glob("D:/images/camera2d", files); vector<vector<Point2f>> imagePoints; vector<vector<Point3f>> objectPoints; TermCriteria criteria = TermCriteria(TermCriteria::EPS | TermCriteria::MAX_ITER, 30, 0.001); int numCornersHor = 7; int numCornersVer = 7; int numSquares = 50; vector<Point3f> obj; for (int i = 0; i < numCornersHor; i++) { for (int j = 0; j < numCornersVer; j++) { obj.push_back(Point3f((float)j * numSquares, (float)i * numSquares, 0)); } } // 进行相机标定 Mat cameraMatrix, distCoeffs; vector<Mat> rvecs, tvecs; calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, criteria); // 畸变校正 for (int i = 0; i < files.size(); i++) { Mat dst; Mat image = imread(files[i]); undistort(image, dst, cameraMatrix, distCoeffs); imshow("image", image); imshow("undistortimage", dst); waitKey(1000); } ``` 上述代码中,首先使用`glob`函数加载相机标定所用的图像文件。然后,定义了图像坐标系和世界坐标系中的点,用于相机标定。通过`calibrateCamera`函数进行相机标定,得到相机内参和畸变系数。最后,使用`undistort`函数实现畸变校正,并显示畸变前后的图像。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值