opencv---摄像机标定笔记记录

参考链接:https://www.cnblogs.com/zyly/p/9366080.html

      随着20世纪后期引入便宜的针孔相机,它们在日常生活中成为常见的事件。不幸的是,这种廉价的价格是:显著的扭曲。幸运的是,这些是常数,校准和一些重新映射,我们可以纠正这一点。此外,通过校准,您还可以确定相机的自然单位(像素)与实际单位之间的关系(例如毫米)。

理论

对于失真,OpenCV考虑到径向和切向因素。对于径向因子,使用以下公式:

使用OpenCV相机校准

因此,对于坐标处的未失真像素点 (x,y),其在失真图像上的位置将为。径向变形的存在表现为“barrel”或“fish-eye”效应的形式

由于摄像镜头不完全平行于成像平面,因此会发生切向畸变。它可以通过公式表示:

使用OpenCV相机校准

所以我们有五个失真参数,它们在OpenCV中呈现为具有5列的一行矩阵:

使用OpenCV相机校准

现在对于单位转换,我们使用以下公式:

使用OpenCV相机校准

这里通过使用单应性坐标系(w = Z)来解释w的存在。未知参数是fx和fy(摄像机焦距)和(cx,cy),它们是以像素坐标表示的光学中心。如果对于两个轴,使用给定的a纵横比(通常为1)的公共焦距,则fy=fx∗a a,并且在上面的公式中,我们将具有单个焦距f。包含这四个参数的矩阵称为相机矩阵。虽然失真系数是相同的,无论使用的相机分辨率,这些应该与校准分辨率的当前分辨率一起缩放。

确定这两个矩阵的过程是校准。这些参数的计算是通过基本几何方程来完成的。所使用的方程取决于所选择的校准对象。

目标

示例应用程序将:

  • 确定失真矩阵
  • 确定相机矩阵
  • 从相机,视频和图像文件列表中输入
  • 从XML / YAML文件读取配置
  • 将结果保存到XML / YAML文件中
  • 计算重新投影误差

获取矫正参数矩阵:

/********************************************************************
	Copyright(C),
	Filename: 	...\Calibration_Sheng\Calibration_Sheng\calib_2d.cpp
	Description:	使用棋盘格标定的流程
	Others:函数使用标准化
	Function List: 
	History:
*********************************************************************/
//https://www.w3cschool.cn/opencv/opencv-64352dtf.html
#include "opencv2/core.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"

#include <cctype>
#include <stdio.h>
#include <string.h>
#include <time.h>


#include <iostream>
#include <fstream>
#include <string>
#include <sstream>

using namespace cv;
using namespace std;

enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
enum Pattern { CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };


/**
 * @name   computeReprojectionErrors
 * @brief  计算投影误差
 * @note    
 * @param  [in] const std::vector<std::vector<cv::Point3f> > & objectPoints  目标三维空间坐标
 * @param  [in] const std::vector<std::vector<cv::Point2f> > & imagePoints		目标图像中的坐标
 * @param  [in] const std::vector<cv::Mat> & rvecs	旋转矩阵
 * @param  [in] const std::vector<cv::Mat> & tvecs	平移矩阵
 * @param  [in] const cv::Mat & cameraMatrix	相机内参数
 * @param  [in] const cv::Mat & distCoeffs		
 * @param  [in/out] std::vector<float> & perViewErrors
 * @return double
 */
static double computeReprojectionErrors(
	const std::vector<std::vector<cv::Point3f> >& objectPoints,
	const std::vector<std::vector<cv::Point2f> >& imagePoints,
	const std::vector<cv::Mat>& rvecs, const std::vector<cv::Mat>& tvecs,
	const cv::Mat& cameraMatrix, const cv::Mat& distCoeffs,
	std::vector<float>& perViewErrors )
{
	std::vector<cv::Point2f> imagePoints2;
	int i, totalPoints = 0;
	double totalErr = 0, err;
	perViewErrors.resize(objectPoints.size());

	for( i = 0; i < (int)objectPoints.size(); i++ )
	{
		projectPoints(cv::Mat(objectPoints[i]), rvecs[i], tvecs[i],
			cameraMatrix, distCoeffs, imagePoints2);
		err = cv::norm(cv::Mat(imagePoints[i]), cv::Mat(imagePoints2), cv::NORM_L2);
		int n = (int)objectPoints[i].size();
		perViewErrors[i] = (float)std::sqrt(err*err/n);
		totalErr += err*err;
		totalPoints += n;
	}

	return std::sqrt(totalErr/totalPoints);
}


static void calcChessboardCorners(cv::Size boardSize, float squareSize, std::vector<cv::Point3f>& corners/*, Pattern patternType = CHESSBOARD*/)
{
	corners.resize(0);
	for( int i = 0; i < boardSize.height; i++ )
	{
		for( int j = 0; j < boardSize.width; j++ )
		{
			corners.push_back(cv::Point3f(float(j*squareSize),
			float(i*squareSize), 0));
		}
	}


	/*switch(patternType)
	{
	case CHESSBOARD:
	case CIRCLES_GRID:
		for( int i = 0; i < boardSize.height; i++ )
			for( int j = 0; j < boardSize.width; j++ )
				corners.push_back(cv::Point3f(float(j*squareSize),
				float(i*squareSize), 0));
		break;

	case ASYMMETRIC_CIRCLES_GRID:
		for( int i = 0; i < boardSize.height; i++ )
			for( int j = 0; j < boardSize.width; j++ )
				corners.push_back(cv::Point3f(float((2*j + i % 2)*squareSize),
				float(i*squareSize), 0));
		break;

	default:
		CV_Error(Error::StsBadArg, "Unknown pattern type\n");
	}*/
}

static bool runCalibration( std::vector< std::vector<cv::Point2f> > imagePoints,
	cv::Size imageSize, cv::Size boardSize, /*Pattern patternType,*/
	float squareSize, /*float aspectRatio,*/
	int flags, cv::Mat& cameraMatrix, cv::Mat& distCoeffs,
	 std::vector<cv::Mat>& rvecs,  std::vector<cv::Mat>& tvecs,
	 std::vector<float>& reprojErrs,
	double& totalAvgErr,double& rms)
{
	cameraMatrix = cv::Mat::eye(3, 3, CV_64F);
	/*if( flags & cv::CALIB_FIX_ASPECT_RATIO )
		cameraMatrix.at<double>(0,0) = aspectRatio;*/

	distCoeffs = cv::Mat::zeros(8, 1, CV_64F);

	std::vector<std::vector<cv::Point3f> > objectPoints(1);
	calcChessboardCorners(boardSize, squareSize, objectPoints[0]/*, patternType*/);

	objectPoints.resize(imagePoints.size(),objectPoints[0]);

	/*double */rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
		distCoeffs, rvecs, tvecs, flags|cv::CALIB_FIX_K4|cv::CALIB_FIX_K5);
	///*|CALIB_FIX_K3*/|CALIB_FIX_K4|CALIB_FIX_K5);
	printf("RMS error reported by calibrateCamera: %g\n", rms);

	bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);

	totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints,
		rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs);

	return ok;
}

static void saveCameraParams( const std::string& filename,
                       cv::Size imageSize, cv::Size boardSize,
                       float squareSize, /*float aspectRatio,*/ int flags,
                       const cv::Mat& cameraMatrix, const cv::Mat& distCoeffs,
                       const std::vector<cv::Mat>& rvecs, const std::vector<cv::Mat>& tvecs,
                       const std::vector<float>& reprojErrs,
                       const std::vector<std::vector<cv::Point2f> >& imagePoints,

                       double totalAvgErr,double rms )
{
    cv::FileStorage fs( filename, cv::FileStorage::WRITE );

    time_t tt;
    time( &tt );
    struct tm *t2 = localtime( &tt );
    char buf[1024];
    strftime( buf, sizeof(buf)-1, "%c", t2 );

    fs << "calibration_time" << buf;

    if( !rvecs.empty() || !reprojErrs.empty() )
        fs << "nframes" << (int)std::max(rvecs.size(), reprojErrs.size());
    fs << "image_width" << imageSize.width;
    fs << "image_height" << imageSize.height;
    fs << "board_width" << boardSize.width;
    fs << "board_height" << boardSize.height;
    fs << "square_size" << squareSize;

	/*if( flags & cv::CALIB_FIX_ASPECT_RATIO )
	fs << "aspectRatio" << aspectRatio;*/

    if( flags != 0 )
    {
        sprintf( buf, "flags: %s%s%s%s",
            flags & cv::CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "",
            flags & cv::CALIB_FIX_ASPECT_RATIO ? "+fix_aspectRatio" : "",
            flags & cv::CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point" : "",
            flags & cv::CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "" );
        cvWriteComment( *fs, buf, 0 );
    }

    fs << "flags" << flags;

    fs << "camera_matrix" << cameraMatrix;
    fs << "distortion_coefficients" << distCoeffs;

    fs << "avg_reprojection_error" << totalAvgErr;
	fs << "avg_reprojection_error" << rms;
    if( !reprojErrs.empty() )
        fs << "per_view_reprojection_errors" << cv::Mat(reprojErrs);

    if( !rvecs.empty() && !tvecs.empty() )
    {
        CV_Assert(rvecs[0].type() == tvecs[0].type());
        cv::Mat bigmat((int)rvecs.size(), 6, rvecs[0].type());
        for( int i = 0; i < (int)rvecs.size(); i++ )
        {
            cv::Mat r = bigmat(cv::Range(i, i+1), cv::Range(0,3));
            cv::Mat t = bigmat(cv::Range(i, i+1), cv::Range(3,6));

            CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1);
            CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1);
            //*.t() is MatExpr (not Mat) so we can use assignment operator
            r = rvecs[i].t();
            t = tvecs[i].t();
        }
        cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
        fs << "extrinsic_parameters" << bigmat;
    }

    if( !imagePoints.empty() )
    {
		cv::Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
        for( int i = 0; i < (int)imagePoints.size(); i++ )
        {
            cv::Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
            cv::Mat imgpti(imagePoints[i]);
            imgpti.copyTo(r);
        }
        fs << "image_points" << imagePtMat;
    }
}



static bool readStringList( const std::string& filename, std::vector<std::string>& l )
{
	l.resize(0);
	cv::FileStorage fs(filename, cv::FileStorage::READ);
	if( !fs.isOpened() )
		return false;
	cv::FileNode n = fs.getFirstTopLevelNode();
	if( n.type() != cv::FileNode::SEQ )
		return false;
	cv::FileNodeIterator it = n.begin(), it_end = n.end();
	for( ; it != it_end; ++it )
		l.push_back((std::string)*it);
	return true;
}

static bool runAndSave(const std::string& outputFilename,
	const std::vector<std::vector<cv::Point2f> >& imagePoints,
	cv::Size imageSize, cv::Size boardSize, /*Pattern patternType,*/ float squareSize,
	/*float aspectRatio, */int flags, cv::Mat& cameraMatrix,
	cv::Mat& distCoeffs, bool writeExtrinsics, bool writePoints )
{
	std::vector<cv::Mat> rvecs, tvecs;
	std::vector<float> reprojErrs;
	double totalAvgErr = 0;
	double rms=0;
	bool ok = runCalibration(imagePoints, imageSize, boardSize, /*patternType,*/ squareSize,
		/*aspectRatio,*/ flags, cameraMatrix, distCoeffs,
		rvecs, tvecs, reprojErrs, totalAvgErr,rms);
	printf("%s. avg reprojection error = %.2f\n",
		ok ? "Calibration succeeded" : "Calibration failed",
		totalAvgErr);

	for (int j=0;j<tvecs.size();j++)
	{
		std::cout<<j<<std::endl;
		std::cout<<tvecs[j]<<std::endl<<std::endl;
	}
	



	if( ok )
		saveCameraParams( outputFilename, imageSize,
		boardSize, squareSize, /*aspectRatio,*/
		flags, cameraMatrix, distCoeffs,
		writeExtrinsics ? rvecs : std::vector<cv::Mat>(),
		writeExtrinsics ? tvecs : std::vector<cv::Mat>(),
		writeExtrinsics ? reprojErrs : std::vector<float>(),
		writePoints ? imagePoints : std::vector<std::vector<cv::Point2f> >(),
		totalAvgErr,rms );
	return ok;
}

static bool calibPicFilter(const std::string& outputFilePath,
	const std::string& inputFilename,std::vector<std::string>& imageList,
	cv::Size boardSize,std::vector<std::vector<cv::Point2f>>& imagePoints)
{

	//获取原图列表
	readStringList(inputFilename, imageList);	
	for(int i = 0;i<imageList.size();i++)
	{
		cv::Mat view, viewGray;		
		view = cv::imread(imageList[i], 1);
		cout << imageList[i] << endl;
		if (view.empty())
			cout << "fail" << endl;

		std::vector<cv::Point2f> pointbuf;
		if(view.channels()==3)	
			cvtColor(view, viewGray, cv::COLOR_BGR2GRAY);//彩色图转灰度图

		//角点提取
		bool found=false;
		found = findChessboardCorners( view, boardSize, pointbuf,cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FAST_CHECK | cv::CALIB_CB_NORMALIZE_IMAGE);

		// 亚像素精确化,提高角点的精度
		if( found) 
		{
			//把图像存储出来
			cv::cornerSubPix( viewGray, pointbuf, cv::Size(11,11),cv::Size(-1,-1), cv::TermCriteria( cv::TermCriteria::EPS+cv::TermCriteria::COUNT, 30, 0.001 ));
			//cv::find4QuadCornerSubpix(viewGray,pointbuf,cv::Size(11,11)); 		
			imagePoints.push_back(pointbuf);
			drawChessboardCorners( view, boardSize, cv::Mat(pointbuf), found );//绘制角点
			//把图像存储出来

			std::string::size_type ipos=imageList[i].find_last_of("\\")+1;
			std::string img_name=imageList[i].substr(ipos,imageList[i].length()-ipos);
			std::string img_path=outputFilePath+"\\"+img_name;
			cv::imwrite(img_path,view);
		}
	}

	return true;
}







int main( int argc, char** argv )
{
	cv::Size boardSize(7,5), imageSize(640,480);
	float squareSize=2.0/*, aspectRatio*/;
	cv::Mat cameraMatrix, distCoeffs;
	std::string outputFilename="out_camera_data.yml";
	std::string inputFilename = "C:\\Users\\Administrator\\Desktop\\标定\\calibpic20190612\\OriginalPic\\in.xml";//源图片
	std::string outputPath="..\\filterPic";//筛选出的图片
	std::string outputImageListPath="out_image_list.xml";//筛选出的图片

	bool writeExtrinsics=true, writePoints=true;
	bool undistortImage = false;
	int flags = 0;

	std::vector<std::vector<cv::Point2f> > imagePoints;
	std::vector<std::string> imageList;
	
	calibPicFilter(outputPath,inputFilename,imageList,boardSize,imagePoints);

	if( imagePoints.size() > 0 )
	{
				runAndSave(outputFilename, imagePoints, imageSize,
				boardSize, /*pattern,*/ squareSize,/* aspectRatio,*/
				flags, cameraMatrix, distCoeffs,
				writeExtrinsics, writePoints);
	}

	cv::FileStorage fs( outputImageListPath, cv::FileStorage::WRITE );
	fs<<"imageList"<<"[";
	fs<<imageList<<"]";


	waitKey(0);
	getchar();
	return 0;
}

所在目录:

 

in.xml文件:

输出的ymal:

使用ymal矫正:

//https://blog.csdn.net/lonelyrains/article/details/46915705
#include "opencv2/opencv.hpp"
#include <string>
#include <iostream>


using namespace cv;
using namespace std;


void loadCameraParams(Mat &cameraMatrix, Mat &distCoeffs)
{
	FileStorage fs("out_camera_data.yaml", FileStorage::READ);

	fs["camera_matrix"] >> cameraMatrix;
	fs["distortion_coefficients"] >> distCoeffs;
}


int main()
{
	//VideoCapture inputVideo(1);
	//inputVideo.set(CV_CAP_PROP_FRAME_WIDTH, 800);
	//inputVideo.set(CV_CAP_PROP_FRAME_HEIGHT, 600);
	//if (!inputVideo.isOpened())
	//{
	//	cout << "Could not open the input video: " << endl;
	//	return -1;
	//}
	Mat frame;
	Mat frameCalibration;
	frame = imread("20190611142018179.bmp");
	//inputVideo >> frame;
	
	Mat cameraMatrix = Mat::zeros(3, 3, CV_64F);
	cameraMatrix.at<double>(0, 0) = 8.3164290667548016e+02;
	cameraMatrix.at<double>(0, 2) = 2.5621626434287748e+02;
	cameraMatrix.at<double>(1, 1) = 8.2630998852990399e+02;
	cameraMatrix.at<double>(1, 2) = 2.4107615207676179e+02;
	cameraMatrix.at<double>(2, 2) = 1;

	Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
	distCoeffs.at<double>(0, 0) = 1.7627063064507345e-01;
	distCoeffs.at<double>(1, 0) = 9.9790853885603562e-02;
	distCoeffs.at<double>(2, 0) = 5.9408661598010027e-03;
	distCoeffs.at<double>(3, 0) = -1.9576300812006838e-02;
	distCoeffs.at<double>(4, 0) = -2.6739120660575884e+00;
	//loadCameraParams(cameraMatrix, distCoeffs);
	Mat view, rview, map1, map2;
	Size imageSize;

	imageSize = frame.size();
	initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
		getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
		imageSize, CV_16SC2, map1, map2);


	remap(frame, frameCalibration, map1, map2, INTER_LINEAR);
	imshow("Origianl", frame);
	imshow("Calibration", frameCalibration);
		
	waitKey(0);
	return 0;
}




参考链接:https://www.w3cschool.cn/opencv/opencv-64352dtf.html

https://blog.csdn.net/dcrmg/article/details/52929669

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

CVer儿

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值