鱼眼摄像头 单目标定 双目标定

31 篇文章 0 订阅
12 篇文章 1 订阅

准确地说是 使用鱼眼模型 对摄像头进行单目和双目标定
直接贴代码如下, 如果是 非鱼眼, 把相关函数名 和相关入参改下就可以了, 代码中处于 注释状态

2019-12-7 21:47:07
后续又思考了一下,OpenCV鱼眼模型应该参考的是 《A Generic Camera Model and Calibration Method for Conventional, Wide-Angle, and Fish-Eye Lenses》。 那既然是通用模型, 对一般相机也能用,也就是说 拿到一个相机,直接用鱼眼模型来标定。。。这个后面验证一下。


#include <opencv2/opencv.hpp>
#include <iostream>
#include <dirent.h>
#include <fstream>

using namespace std;

bool hasEnding (std::string const &fullString, std::string const &ending) {
    if (fullString.length() >= ending.length()) {
        return (0 == fullString.compare (fullString.length() - ending.length(), ending.length(), ending));
    } else {
        return false;
    }
}

// 扩展名必须为 jpg或者 png
void getImgNames(const char* imgsFolderL, const char* imgsFolderR, std::vector<std::string> &imgNamesL,
		std::vector<std::string> &imgNamesR)
{
	DIR* dp;
	struct dirent* dirp;
	dp = opendir(imgsFolderL);
	if(dp == NULL)
	{
		std::cout << "open directory error: " << imgsFolderL << std::endl;
	}

	while((dirp = readdir(dp)) != NULL)
	{
		std::string filename = dirp->d_name;
		if(filename.at(0) == '.')
		{
			continue;
		} else if(!hasEnding(filename, ".jpg") || !hasEnding(filename, ".png")) {
			continue;
		}

		filename = "/" + filename;
		std::cout << (imgsFolderL + filename) << std::endl;
		std::cout << (imgsFolderR + filename) << std::endl;
		imgNamesL.push_back(imgsFolderL + filename);
		imgNamesR.push_back(imgsFolderR + filename);
	}
	closedir(dp);

}

static bool saveCameraParams(const std::string &filename, int &leftHeight, int &leftWidth, 
		cv::Mat &leftD, cv::Mat &leftK, cv::Mat &leftR, cv::Mat leftP,
		int &rightHeight, int &rightWidth, cv::Mat &rightD, cv::Mat &rightK, cv::Mat &rightR, cv::Mat &rightP, cv::Mat &trans, cv::Mat& Trl)
{
	std::ofstream fs(filename.c_str());
	if(!fs.is_open())
	{
		return false;
	}
	/* std::time_t tt; */
	/* std::time(&tt); */
	/* struct tm *t2 = localtime(&tt); */
	/* char buf[1024]; */
	/* std::strftime(buf, sizeof(buf), "%c", t2); */

	/* std::string str = "1.0"; */
	fs << "%YAML:1.0" << std::endl;
	fs << std::endl;
	fs << "#-----------------------------------------" << std::endl;
	fs << "# Camera Parameters. Adjust them!" << std::endl;
	fs << "#-----------------------------------------" << std::endl;
	fs << std::endl;
	/* fs << "#calibration_time " << buf << std::endl; */
	fs << std::endl;
	fs << "Camera.fx: " << leftP.ptr<double>(0)[0] << std::endl;
	fs << "Camera.fy: " << leftP.ptr<double>(1)[1] << std::endl;
	fs << "Camera.cx: " << leftP.ptr<double>(0)[2] << std::endl;
	fs << "Camera.cy: " << leftP.ptr<double>(1)[2] << std::endl;
	fs << std::endl;
	fs << "Camera.k1: " << 0.0 << std::endl;
	fs << "Camera.k2: " << 0.0 << std::endl;
	fs << "Camera.p1: " << 0.0 << std::endl;
	fs << "Camera.p2: " << 0.0 << std::endl;
	fs << std::endl;
	fs << "Camera.width: " << leftWidth << std::endl;
	fs << "Camera.height: " << leftHeight << std::endl;
	fs << std::endl;
	fs << "#Camera frames per second" << std::endl;
	fs << "Camera.fps: " << 20 << std::endl;
	fs << std::endl;
	fs << "#stereo baseline × Camera.fx" << std::endl;
	float bf = std::fabs(trans.ptr<double>(0)[0] * leftP.ptr<double>(0)[0]);
	fs << "Camera.bf: " << bf << std::endl; 
	fs << std::endl;
	fs << "# Color order of image (0 BGR, 1 RGB, It is ignored if images are grayscale)" << std::endl;
	fs << "Camera.RGB: " << 1 << std::endl;
	fs << std::endl;
	fs << "# Close/Far threshold. Baseline times." << std::endl;
	fs << "ThDepth: " << 35 << std::endl;
	fs << std::endl;
	fs << "#-------------------------------------------" << std::endl;
	fs << "#Stereo Rectification. Camera.fx, Camera.fy must be the same as in LEFT.P" << std::endl;
	fs << "#-------------------------------------------" << std::endl;
	fs << std::endl;
	fs << "LEFT.height: " << leftHeight << std::endl;
	fs << "LEFT.width: " << leftWidth << std::endl;
	fs << "LEFT.D: !!opencv-matrix" << std::endl;
	fs << "    rows: " << 1 << std::endl;
	fs << "    cols: " << 5 << std::endl;
	fs << "    dt: " << 'd' << std::endl;
	fs << "    data: " << leftD << std::endl;

	cv::Mat rowLeftK = leftK.reshape(0, 1);
	fs << "LEFT.K: !!opencv-matrix" << std::endl;
	fs << "    rows: " << 3 << std::endl;
	fs << "    cols: " << 3 << std::endl;
	fs << "    dt: " << 'd' << std::endl;
	fs << "    data: " << rowLeftK << std::endl;

	cv::Mat rowLeftR = leftR.reshape(0, 1);
	fs << "LEFT.R: !!opencv-matrix" << std::endl;
	fs << "    rows: " << 3 << std::endl;
	fs << "    cols: " << 3 << std::endl;
	fs << "    dt: " << 'd' << std::endl;
	fs << "    data: " << rowLeftR << std::endl;

	cv::Mat rowLeftP = leftP.reshape(0, 1);
	fs << "LEFT.P: !!opencv-matrix" << std::endl;
	fs << "    rows: " << 3 << std::endl;
	fs << "    cols: " << 4 << std::endl;
	fs << "    dt: " << 'd' << std::endl;
	fs << "    data: " << rowLeftP << std::endl;
	fs << std::endl;
	fs << "RIGHT.height: " << rightHeight << std::endl;
	fs << "RIGHT.width: " << rightWidth << std::endl;
	fs << "RIGHT.D: !!opencv-matrix" << std::endl;
	fs << "    rows: " << 1 << std::endl;
	fs << "    cols: " << 5 << std::endl;
	fs << "    dt: " << 'd' << std::endl;
	fs << "    data: " << rightD << std::endl;

	cv::Mat rowRightK = rightK.reshape(0, 1);
	fs << "RIGHT.K: !!opencv-matrix" << std::endl;
	fs << "    rows: " << 3 << std::endl;
	fs << "    cols: " << 3 << std::endl;
	fs << "    dt: " << 'd' << std::endl;
	fs << "    data: " << rowRightK << std::endl;

	cv::Mat rowRightR = rightR.reshape(0, 1);
	fs << "RIGHT.R: !!opencv-matrix" << std::endl;
	fs << "    rows: " << 3 << std::endl;
	fs << "    cols: " << 3 << std::endl;
	fs << "    dt: " << 'd' << std::endl;
	fs << "    data: " << rowRightR << std::endl;

	cv::Mat rowRightP = rightP.reshape(0, 1);
	fs << "RIGHT.P: !!opencv-matrix" << std::endl;
	fs << "    rows: " << 3 << std::endl;
	fs << "    cols: " << 4 << std::endl;
	fs << "    dt: " << 'd' << std::endl;
	fs << "    data: " << rowRightP << std::endl;
	fs << std::endl;
	fs << "#-------------------------------------------" << std::endl;
	fs << "#ORB Parameters" << std::endl;
	fs << "#-------------------------------------------" << std::endl;
	fs << std::endl;
	fs << "ORBextractor.nFeatures: " << 1200 << std::endl;
	fs << "ORBextractor.scaleFactor: " << 1.2 << std::endl;
	fs << "ORBextractor.nLevels: " << 8 << std::endl;
	fs << "ORBextractor.iniThFAST: " << 20 << std::endl;
	fs << "ORBextractor.minThFAST: " << 7 << std::endl;
	fs << std::endl;
	fs << "#-------------------------------------------" << std::endl;
	fs << "#Viewer Parameters" << std::endl;
	fs << "#-------------------------------------------" << std::endl;
	fs << std::endl;
	fs << "bUseViewer: " << 0 << std::endl;
	fs << "Viewer.KeyFrameSize: " << 0.05 << std::endl;
	fs << "Viewer.KeyFrameLineWidth: " << 1 << std::endl;
	fs << "Viewer.GraphLineWidth: " << 0.9 << std::endl;
	fs << "Viewer.PointSize: " << 2 << std::endl;
	fs << "Viewer.CameraSize: " << 0.08 << std::endl;
	fs << "Viewer.CameraLineWidth: " << 3 << std::endl;
	fs << "Viewer.ViewpointX: " << 0 << std::endl;
	fs << "Viewer.ViewpointY: " << -0.7 << std::endl;
	fs << "Viewer.ViewpointZ: " << -1.8 << std::endl;
	fs << "Viewer.ViewpointF: " << 500 << std::endl;

	cv::Mat rowTrl = Trl.reshape(0, 1);
	fs << "############" << std::endl;
	fs << "#the extrinsic params from left camera to right camera !" << std::endl;
	fs << "#Or the extrinsic params from left to the main camera in VIO project!" << std::endl;
	fs << "LEFT.RIGHT: !!opencv-matrix" << std::endl;
	fs << "    rows: " << 4 << std::endl;
	fs << "    cols: " << 4 << std::endl;
	fs << "    dt: " << 'f' << std::endl;
	fs << "    data: " << rowTrl << std::endl;
	fs << std::endl;


	std::cout << "saved the parameters in " << filename << std::endl;
	return true;
}



cv::Mat RPY2mat(double roll, double pitch, double yaw)
{
	cv::Mat m(3, 3, CV_64FC1);

	double cr = std::cos(roll);
	double sr = std::sin(roll);
	double cp = std::cos(pitch);
	double sp = std::sin(pitch);
	double cy = std::cos(yaw);
	double sy = std::sin(yaw);

	m.ptr<double>(0)[0] = cy * cp;
	m.ptr<double>(0)[1] = cy * sp * sr - sy * cr;
	m.ptr<double>(0)[2] = cy * sp * cr + sy * sr;
	m.ptr<double>(1)[0] = sy * cp;
	m.ptr<double>(1)[1] = sy * sp * sr + cy * cr;
	m.ptr<double>(1)[2] = sy * sp * cr - cy * sr;
	m.ptr<double>(2)[0] = - sp;
	m.ptr<double>(2)[1] = cp * sr;
	m.ptr<double>(2)[2] = cp * cr;
	 return m.clone();
}

void mat2RPY(const cv::Mat &m, double& roll, double& pitch, double& yaw)
{
	roll = std::atan2(m.ptr<double>(2)[1], m.ptr<double>(2)[2]);
	pitch = std::atan2(-m.ptr<double>(2)[0], std::sqrt(m.ptr<double>(2)[1] * m.ptr<double>(2)[1] + m.ptr<double>(2)[2] * m.ptr<double>(2)[2]));
	yaw = std::atan2(m.ptr<double>(1)[0], m.ptr<double>(0)[0]);
}

int main(int argc, char *argv[])
{
	//argv[1]: 左相机图片所在的文件夹的路径
	//argv[2]: 右相机图片所在的文件夹的路径
	//要求左右两张图片的名字是一样的
	std::vector<std::string> imgNamesL, imgNamesR;
	getImgNames(argv[1], argv[2], imgNamesL, imgNamesR);

	const int board_w = 7;
	const int board_h = 5;
	const int NPoints = board_w * board_h;
	const float boardSize = 0.034;
	cv::Size chessBoardSize = cv::Size(board_w, board_h);
	std::vector<cv::Point3f> object;
	for( int j = 0; j < NPoints; j++ )
	{
		object.push_back(cv::Point3f((j % board_w) * boardSize, (j / board_w) * boardSize, 0));
	}

	std::vector<std::vector<cv::Point3f> > objectv;
	std::vector<std::vector<cv::Point2f> > imagevL, imagevR;
	int height, width;
	cv::Mat imageL, grayImageL, imageR, grayImageR;
	std::vector<cv::Mat> imgsL, imgsR;
	for( int i = 0; i < (int)imgNamesL.size(); i++ )
	{
		imageL = cv::imread(imgNamesL[i]);
		imageR = cv::imread(imgNamesR[i]);
		imgsL.push_back(imageL);
		imgsR.push_back(imageR);
		if(imageL.empty())
		{
			std::cout << imgNamesL[i] << " is empty!" << std::endl;
			break;
		}
		if(imageR.empty())
		{
			std::cout << imgNamesR[i] << " is empty!" << std::endl;
		}
		height = imageL.rows;
		width = imageL.cols;
		
		if(imageL.channels() == 3)
		{
			cv::cvtColor(imageL, grayImageL, CV_BGR2GRAY);
		}
		if(imageR.channels() == 3)
		{
			cv::cvtColor(imageR, grayImageR, CV_BGR2GRAY);
		}

		IplImage tempgrayL = grayImageL;
		IplImage tempgrayR = grayImageR;

		bool findchessboardL = cvCheckChessboard(&tempgrayL, chessBoardSize);
		bool findchessboardR = cvCheckChessboard(&tempgrayR, chessBoardSize);
		std::vector<cv::Point2f> tempCornersL, tempCornersR;
		if(findchessboardL && findchessboardR)
		{
			bool find_corners_resultL = cv::findChessboardCorners(grayImageL, chessBoardSize, tempCornersL, 3);
			bool find_corners_resultR = cv::findChessboardCorners(grayImageR, chessBoardSize, tempCornersR, 3);
			if(find_corners_resultL && find_corners_resultR)
			{
				std::cout << "Capture " << imgNamesL[i] << std::endl;
				cv::cornerSubPix(grayImageL, tempCornersL, cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
				cv::cornerSubPix(grayImageR, tempCornersR, cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
				cv::drawChessboardCorners(imageL, chessBoardSize, tempCornersL, find_corners_resultL);
				cv::drawChessboardCorners(imageR, chessBoardSize, tempCornersR, find_corners_resultR);
				cv::imshow("L", imageL);
				cv::imshow("R", imageR);
				cv::waitKey();

				objectv.push_back(object);
				imagevL.push_back(tempCornersL);
				imagevR.push_back(tempCornersR);
			}
			else
			{
				std::cout << "find_corners_resultL: " << find_corners_resultL << " find_corners_resultR: " << find_corners_resultR << std::endl;
				cv::imshow("L", imageL);
				cv::imshow("R", imageR);
				cv::waitKey();
			}
		}
		else
		{
			std::cout << "findchessboardL: " << findchessboardL << " findchessboardR: " << findchessboardR << std::endl;
			cv::imshow("L", imageL);
			cv::imshow("R", imageR);
			cv::waitKey();
		}

		tempCornersL.clear();
		tempCornersR.clear();
	}

	//标定左右两个相机的内参
	std::vector<cv::Mat> rvecsL, tvecsL, rvecsR, tvecsR;
	cv::Mat intrinsicL, distortionL, intrinsicR, distortionR;
	//double errL = cv::calibrateCamera(objectv, imagevL, cv::Size(width, height), intrinsicL, distortionL, rvecsL, tvecsL);

	 int flag = 0; flag |= cv::fisheye::CALIB_RECOMPUTE_EXTRINSIC; 
	 //flag |= cv::fisheye::CALIB_CHECK_COND; 
	 flag |= cv::fisheye::CALIB_FIX_SKEW; 
	 //flag |= cv::fisheye::CALIB_USE_INTRINSIC_GUESS;

	double errL =  cv::fisheye::calibrate(objectv, imagevL, cv::Size(width, height), intrinsicL, 
	 distortionL, cv::noArray(), cv::noArray(), flag, cv::TermCriteria(3, 20, 1e-6)); 

	//  cv::Mat mapx, mapy;
    // cv::Mat corrected;
	// cv::Size corrected_size(width, height);
	//  cv::fisheye::initUndistortRectifyMap(intrinsicL, distortionL, cv::Matx33d::eye(), intrinsicL, 
	//  corrected_size, CV_16SC2, mapx, mapy); 


	std::cout << "左相机内参-------------------------" << std::endl;
	std::cout << "errL: " << errL << std::endl;
	std::cout << "intrinsicL: " << std::endl;
	std::cout << intrinsicL << std::endl;
	std::cout << "distortionL: " << std::endl;
	std::cout << distortionL << std::endl;

	std::cout << "右相机内参-------------------------" << std::endl;
	//double errR = cv::calibrateCamera(objectv, imagevR, cv::Size(width, height), intrinsicR, distortionR, rvecsR, tvecsR);

	double errR = cv::fisheye::calibrate(objectv, imagevR, cv::Size(width, height), intrinsicR, 
	 distortionR, cv::noArray(), cv::noArray(), flag, cv::TermCriteria(3, 20, 1e-6)); 
	std::cout << "errR: " << errR << std::endl;
	std::cout << "intrinsicR: " << std::endl;
	std::cout << intrinsicR << std::endl;
	std::cout << "distortionR: " << std::endl;
	std::cout << distortionR << std::endl;


	//标定左右两个相机之间的外参
	cv::Mat E, F, R, t;
	cv::Size imgSize(width, height);
	
	//double errLR = cv::stereoCalibrate(objectv, imagevL, imagevR, intrinsicL, distortionL, intrinsicR, distortionR, 
	//imgSize, R, t, E, F);
	double errLR = cv::fisheye::stereoCalibrate(objectv, imagevL, imagevR, 
	intrinsicL, distortionL, intrinsicR, distortionR, imgSize,R, t);

	std::cout << "外参--------------------------------" << std::endl;
	std::cout << "errLR: " << errLR << std::endl;
	std::cout << "R: " << std::endl;
	std::cout << R << std::endl;
	std::cout << "t: " << std::endl;
	std::cout << t << std::endl;


	//立体校正
	cv::Mat Rl, Rr, Pl, Pr, Q;
	//cv::stereoRectify(intrinsicL, distortionL, intrinsicR, distortionR, imgSize, R, t, Rl, Rr, Pl, Pr, Q);
	cv::fisheye::stereoRectify(intrinsicL, distortionL, intrinsicR, distortionR, imgSize,
	 R, t, Rl, Rr, Pl, Pr, Q,
	cv::CALIB_ZERO_DISPARITY,imgSize);

	cv::Mat Trl = cv::Mat::eye(4, 4, CV_64FC1);
	R.copyTo(Trl.rowRange(0, 3).colRange(0, 3));
	t.copyTo(Trl.rowRange(0, 3).col(3));

	std::string fileName = "stereoRectify.yaml";
	saveCameraParams(fileName, height, width, distortionL, intrinsicL, Rl, Pl, 
			height, width, distortionR, intrinsicR, Rr, Pr, t, Trl);


	cv::Mat lmapx, lmapy, rmapx, rmapy;
	//cv::initUndistortRectifyMap(intrinsicL, distortionL, Rl, Pl, imgSize, CV_32F, lmapx, lmapy);
	//cv::initUndistortRectifyMap(intrinsicR, distortionR, Rr, Pr, imgSize, CV_32F, rmapx, rmapy);

	cv::fisheye::initUndistortRectifyMap(intrinsicL, distortionL, Rl, Pl, imgSize, CV_32F, lmapx, lmapy);
	cv::fisheye::initUndistortRectifyMap(intrinsicR, distortionR, Rr, Pr, imgSize, CV_32F, rmapx, rmapy);

	int num = 0;
	cv::Mat imLeftRect, imRightRect;
	while(num < (int)imgsL.size())
	{
		cv::Mat imgL = imgsL[num];
		cv::Mat imgR = imgsR[num];
		if(imgL.empty() || imgR.empty())
		{
			break;
		}
		cv::remap(imgL, imLeftRect, lmapx, lmapy, cv::INTER_LINEAR);
		cv::remap(imgR, imRightRect, rmapx, rmapy, cv::INTER_LINEAR);

		cv::Mat imgRectShow;
		imgRectShow.create(height, width * 2, CV_8UC3);

		imLeftRect.copyTo(imgRectShow.rowRange(0, height).colRange(0, width));
		imRightRect.copyTo(imgRectShow.rowRange(0, height).colRange(width, 2*width));

		int allLineNum = 20;
		for( int lineNum = 0; lineNum < allLineNum; lineNum++ )
		{
			cv::Point2f pt1(0, lineNum * 20);
			cv::Point2f pt2(imgRectShow.cols-1,lineNum * 20);

			cv::line(imgRectShow, pt1, pt2, cv::Scalar(0, 0, 255));
		}
		cv::imshow("imgRectShow", imgRectShow);
		cv::waitKey(0);
		
		num++;
	}
	return 0;
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值