(图)效果图
【1】写这边博客主要是为了总结一下以前做的双目标定,方便以后找工作嘛,太惨我自己之前写的代码都搞乱 了,没办法用两天时间重新搞了一遍,大部分是转的别人代码,有一个是我自己以前调试的。顺带给需要的童鞋们做个参考。
下面代码如没有特殊说明,都是在OPENCV320 能跑通的,有一个是249版本。
【2】双目截取图片代码 参考
#include <opencv2/opencv.hpp>
#include <iostream>
#include <math.h>
#include <string>
using namespace cv;
using namespace std;// //开头我是从教程学的,一般不变,直接用
int main(int argc, char* argv[])
{
VideoCapture cap(0);
VideoCapture cap1(1); // //打开两个摄像头
if (!cap.isOpened())
{
printf("Could not open camera0...\n");
return -1;
}
if (!cap1.isOpened())
{
printf("Could not open camera1...\n");
return -2;
} // //判断还是加上为好,便于调程序
Mat frame, frame1;
bool stop = false;
int tt = 1;
while (!stop)
{
cap.read(frame);
cap1.read(frame1);
imshow("camera0", frame);
imshow("camera1", frame1);
int delay = 30;
if (delay >= 0 && waitKey(delay) >= 0)
{
waitKey(0);//
tt = tt + 1; //实时拍摄暂停的程序
printf("%d", tt);
}
imwrite("left"+ to_string(tt) + "+.jpg",frame1);
imwrite("right"+ to_string(tt)+".jpg", frame);
}
cap.release();
cap1.release(); // //两个摄像头数据释放
return 0;
}
【3】下面代码用的opencv249
这个一年前写的代码,实在忘了都参考那些博客了,抱歉不给出参考了
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <cv.h>
#include <cxmisc.h>
#include <highgui.h>
#include <cvaux.h>
#include <iostream>
#include <ctype.h>
//#include <unistd.h>
#include <stdlib.h>
#include <vector>
#include <string>
#include <algorithm>
#include <ctype.h>
#include <stdarg.h>
#include <string.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <stdio.h>
using namespace cv;
using namespace std;
//vector<Point2f> point1, point2;
bool left_mouse = false;
Point2f point;
int pic_info[4];
Mat gray, prevGray, image, image1;
const Scalar GREEN = Scalar(0, 255, 0);
int rect_width = 0, rect_height = 0;
Point tmpPoint;
int num = 0;
int m_frameWidth = 640;
int m_frameHeight = 480;
bool m_Calib_Data_Loaded; // 是否成功载入定标参数
cv::Mat m_Calib_Mat_Q; // Q 矩阵
cv::Mat m_Calib_Mat_Remap_X_L; // 左视图畸变校正像素坐标映射矩阵 X
cv::Mat m_Calib_Mat_Remap_Y_L; // 左视图畸变校正像素坐标映射矩阵 Y
cv::Mat m_Calib_Mat_Remap_X_R; // 右视图畸变校正像素坐标映射矩阵 X
cv::Mat m_Calib_Mat_Remap_Y_R; // 右视图畸变校正像素坐标映射矩阵 Y
cv::Mat m_Calib_Mat_Mask_Roi; // 左视图校正后的有效区域
cv::Rect m_Calib_Roi_L; // 左视图校正后的有效区域矩形
cv::Rect m_Calib_Roi_R; // 右视图校正后的有效区域矩形
double m_FL;
//CvStereoBMState *BMState = cvCreateStereoBMState();
int m_numberOfDisparies; // 视差变化范围
int m_numberOfDisparies; // 视差变化范围
cv::StereoBM m_BM;
CvMat* vdisp = cvCreateMat(480, 640, CV_8U);
cv::Mat img1, img2, img1p, img2p, disp, disp8u, pointClouds, imageLeft, imageRight, disparityImage, imaget1;
static IplImage *framet1 = NULL;
static IplImage *framet2 = NULL;
static IplImage *framet3 = NULL;
static IplImage *framet = NULL;
static void onMouse(int event, int x, int y, int /*flags*/, void* /*param*/){
Mat mouse_show;
image.copyTo(mouse_show);
//char buffer[100];
//sprintf(buffer, "D:\\l%d.jpg", num);
//string t1(buffer);
//sprintf(buffer, "D:\\r%d.jpg", num);
//string t(buffer);
if (event == CV_EVENT_LBUTTONDOWN)
{
pic_info[0] = x;
pic_info[1] = y;
cout << "x:" << pic_info[0] << "y:" << pic_info[1] << endl;
left_mouse = true;
//用于存储打印图片
//imwrite(t, image);
// imwrite(t1, image1);
// num = num++;
}
else if (event == CV_EVENT_LBUTTONUP)
{
left_mouse = false;
}
else if ((event == CV_EVENT_MOUSEMOVE) && (left_mouse == true))
{
}
}
int loadCalibData()
{
// 读入摄像头定标参数 Q roi1 roi2 mapx1 mapy1 mapx2 mapy2
try
{
cv::FileStorage fs("calib_paras(BM5).xml", cv::FileStorage::READ);
cout << fs.isOpened() << endl;
if (!fs.isOpened())
{
return (0);
}
cv::Size imageSize;
cv::FileNodeIterator it = fs["imageSize"].begin();
it >> imageSize.width >> imageSize.height;
// if (imageSize.width != m_frameWidth || imageSize.height != m_frameHeight) { return (-1); }
vector<int> roiVal1;
vector<int> roiVal2;
fs["leftValidArea"] >> roiVal1;
m_Calib_Roi_L.x = roiVal1[0];
m_Calib_Roi_L.y = roiVal1[1];
m_Calib_Roi_L.width = roiVal1[2];
m_Calib_Roi_L.height = roiVal1[3];
fs["rightValidArea"] >> roiVal2;
m_Calib_Roi_R.x = roiVal2[0];
m_Calib_Roi_R.y = roiVal2[1];
m_Calib_Roi_R.width = roiVal2[2];
m_Calib_Roi_R.height = roiVal2[3];
fs["QMatrix"] >> m_Calib_Mat_Q;
fs["remapX1"] >> m_Calib_Mat_Remap_X_L;
fs["remapY1"] >> m_Calib_Mat_Remap_Y_L;
fs["remapX2"] >> m_Calib_Mat_Remap_X_R;
fs["remapY2"] >> m_Calib_Mat_Remap_Y_R;
cv::Mat lfCamMat;
fs["leftCameraMatrix"] >> lfCamMat;
m_FL = lfCamMat.at<double>(0, 0);
m_Calib_Mat_Q.at<double>(3, 2) = -m_Calib_Mat_Q.at<double>(3, 2);
m_Calib_Mat_Mask_Roi = cv::Mat::zeros(m_frameHeight, m_frameWidth, CV_8UC1);
cv::rectangle(m_Calib_Mat_Mask_Roi, m_Calib_Roi_L, cv::Scalar(255), -1);
m_BM.state->roi1 = m_Calib_Roi_L;
m_BM.state->roi2 = m_Calib_Roi_R;
m_Calib_Data_Loaded = true;
string method;
fs["rectifyMethod"] >> method;
if (method != "BOUGUET")
{
return (-2);
}
}
catch (std::exception& e)
{
m_Calib_Data_Loaded = false;
return (-99);
}
return 1;
}
void updatebm()
{
m_BM.state->preFilterCap = 31;
m_BM.state->SADWindowSize = 19;
m_BM.state->minDisparity = 0;
m_BM.state->numberOfDisparities = 96;
m_BM.state->textureThreshold = 10;
m_BM.state->uniquenessRatio = 25;
m_BM.state->speckleWindowSize = 100;
m_BM.state->speckleRange = 32;
m_BM.state->disp12MaxDiff = -1;
}
int bmMatch(cv::Mat& frameLeft, cv::Mat& frameRight, cv::Mat& disparity, cv::Mat& imageLeft, cv::Mat& imageRight)
{
// 输入检查
if (frameLeft.empty() || frameRight.empty())
{
disparity = cv::Scalar(0);
return 0;
}
if (m_frameWidth == 0 || m_frameHeight == 0)
{
//if (init(frameLeft.cols, frameLeft.rows, "calib_paras.xml"/*待改为由本地设置文件确定*/) == 0) //执行类初始化
// {
return 0;
//}
}
// 转换为灰度图
cv::Mat img1proc, img2proc;
cvtColor(frameLeft, img1proc, CV_BGR2GRAY);
cvtColor(frameRight, img2proc, CV_BGR2GRAY);
// 校正图像,使左右视图行对齐
cv::Mat img1remap, img2remap;
//cout << m_Calib_Data_Loaded << endl;
if (m_Calib_Data_Loaded)
{
remap(img1proc, img1remap, m_Calib_Mat_Remap_X_L, m_Calib_Mat_Remap_Y_L, cv::INTER_LINEAR); // 对用于视差计算的画面进行校正
remap(img2proc, img2remap, m_Calib_Mat_Remap_X_R, m_Calib_Mat_Remap_Y_R, cv::INTER_LINEAR);
}
else
{
img1remap = img1proc;
img2remap = img2proc;
}
// 对左右视图的左边进行边界延拓,以获取与原始视图相同大小的有效视差区域
cv::Mat img1border, img2border;
if (m_numberOfDisparies != m_BM.state->numberOfDisparities)
m_numberOfDisparies = m_BM.state->numberOfDisparities;
copyMakeBorder(img1remap, img1border, 0, 0, m_BM.state->numberOfDisparities, 0, IPL_BORDER_REPLICATE);
copyMakeBorder(img2remap, img2border, 0, 0, m_BM.state->numberOfDisparities, 0, IPL_BORDER_REPLICATE);
// 计算视差
cv::Mat dispBorder;
m_BM(img1border, img2border, dispBorder);
//cvFindStereoCorrespondenceBM(img1border, img2border, dispBorder,BMState);
// 截取与原始画面对应的视差区域(舍去加宽的部分)
cv::Mat disp;
disp = dispBorder.colRange(m_BM.state->numberOfDisparities, img1border.cols);
disp.copyTo(disparity, m_Calib_Mat_Mask_Roi);
//reprojectImageTo3D(dispBorder, pointClouds, m_Calib_Mat_Q, true);
// 输出处理后的图像
//cout << m_Calib_Data_Loaded << endl;
if (m_Calib_Data_Loaded)
{
remap(frameLeft, imageLeft, m_Calib_Mat_Remap_X_L, m_Calib_Mat_Remap_Y_L, cv::INTER_LINEAR);
rectangle(imageLeft, m_Calib_Roi_L, CV_RGB(0, 0, 255), 3);
}
else
frameLeft.copyTo(imageLeft);
if (m_Calib_Data_Loaded)
remap(frameRight, imageRight, m_Calib_Mat_Remap_X_R, m_Calib_Mat_Remap_Y_R, cv::INTER_LINEAR);
else
frameRight.copyTo(imageRight);
rectangle(imageRight, m_Calib_Roi_R, CV_RGB(0, 0, 255), 3);
return 1;
}
int getDisparityImage(cv::Mat& disparity, cv::Mat& disparityImage, bool isColor)
{
// 将原始视差数据的位深转换为 8 位
cv::Mat disp8u;
if (disparity.depth() != CV_8U)
{
if (disparity.depth() == CV_8S)
{
disparity.convertTo(disp8u, CV_8U);
}
else
{
disparity.convertTo(disp8u, CV_8U, 255 / (m_numberOfDisparies*16.));
}
}
else
{
disp8u = disparity;
}
// 转换为伪彩色图像 或 灰度图像
if (isColor)
{
if (disparityImage.empty() || disparityImage.type() != CV_8UC3 || disparityImage.size() != disparity.size())
{
disparityImage = cv::Mat::zeros(disparity.rows, disparity.cols, CV_8UC3);
}
for (int y = 0; y<disparity.rows; y++)
{
for (int x = 0; x<disparity.cols; x++)
{
uchar val = disp8u.at<uchar>(y, x);
uchar r, g, b;
if (val == 0)
r = g = b = 0;
else
{
r = 255 - val;
g = val < 128 ? val * 2 : (uchar)((255 - val) * 2);
b = val;
}
disparityImage.at<cv::Vec3b>(y, x) = cv::Vec3b(r, g, b);
}
}
}
else
{
disp8u.copyTo(disparityImage);
}
return 1;
}
int getPointClouds(cv::Mat& disparity, cv::Mat& pointClouds)
{
if (disparity.empty())
{
return 0;
}
//计算生成三维点云
// cv::reprojectImageTo3D(disparity, pointClouds, m_Calib_Mat_Q, true);
reprojectImageTo3D(disparity, pointClouds, m_Calib_Mat_Q, true);
pointClouds *= 1.6;
for (int y = 0; y < pointClouds.rows; ++y)
{
for (int x = 0; x < pointClouds.cols; ++x)
{
cv::Point3f point = pointClouds.at<cv::Point3f>(y, x);
point.y = -point.y;
pointClouds.at<cv::Point3f>(y, x) = point;
}
}
return 1;
}
void detectDistance(cv::Mat& pointCloud)
{
if (pointCloud.empty())
{
return;
}
// 提取深度图像
vector<cv::Mat> xyzSet;
split(pointCloud, xyzSet);
cv::Mat depth;
xyzSet[2].copyTo(depth);
// 根据深度阈值进行二值化处理
double maxVal = 0, minVal = 0;
cv::Mat depthThresh = cv::Mat::zeros(depth.rows, depth.cols, CV_8UC1);
cv::minMaxLoc(depth, &minVal, &maxVal);
double thrVal = minVal * 1.5;
threshold(depth, depthThresh, thrVal, 255, CV_THRESH_BINARY_INV);
depthThresh.convertTo(depthThresh, CV_8UC1);
//imageDenoising(depthThresh, 3);
double distance = depth.at<float>(pic_info[0], pic_info[1]);
cout << "distance:" << distance << endl;
}
int main(int argc, char** argv)
{
//读取摄像头
VideoCapture cap(0);
VideoCapture cap1(1);
if (!cap.isOpened())
{
cout << "error happened while open cam 1" << endl;
return -1;
}
if (!cap1.isOpened())
{
cout << "error happened while open cam 2" << endl;
return -1;
}
namedWindow("left", 1);
namedWindow("right", 1);
namedWindow("disparitycolor", 1);
setMouseCallback("disparitycolor", onMouse, 0);
loadCalibData();
cout << m_Calib_Data_Loaded << endl;
while (true)
{
Mat frame;
Mat frame1;
cap.read(frame);
cap1.read(frame1);
if (frame.empty()) break;
if (frame1.empty()) break;
frame.copyTo(image);
frame1.copyTo(image1);
updatebm();
bmMatch(frame, frame1, disp, imageLeft, imageRight);
imshow("left", imageLeft);
imshow("right", imageRight);
getDisparityImage(disp, disparityImage, true);
// framet2 = &IplImage(disparityImage);
// cvShowImage("disparitycolor", framet2);
getPointClouds(disp, pointClouds);
imshow("disparitycolor", disparityImage);
detectDistance(pointClouds);
waitKey(100);
}
//std::swap(point2, point1);
// cv::swap(prevGray, gray);
return 0;
}
【4】下面代码用的opencv320
代码参考https://blog.csdn.net/xiao__run/article/details/78900652
/******************************/
/* 立体匹配和测距 */
/******************************/
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
const int imageWidth = 800; //摄像头的分辨率
const int imageHeight = 600;
Size imageSize = Size(imageWidth, imageHeight);
Mat rgbImageL, grayImageL;
Mat rgbImageR, grayImageR;
Mat rectifyImageL, rectifyImageR;
Rect validROIL;//图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域
Rect validROIR;
Mat mapLx, mapLy, mapRx, mapRy; //映射表
Mat Rl, Rr, Pl, Pr, Q; //校正旋转矩阵R,投影矩阵P 重投影矩阵Q
Mat xyz; //三维坐标
Point origin; //鼠标按下的起始点
Rect selection; //定义矩形选框
bool selectObject = false; //是否选择对象
int blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
Ptr<StereoBM> bm = StereoBM::create(16, 9);
/*
事先标定好的相机的参数
fx 0 cx
0 fy cy
0 0 1
*/
Mat cameraMatrixL = (Mat_<double>(3, 3) << 682.55880, 0, 384.13666,
0, 682.24569, 311.19558,
0, 0, 1);
//对应matlab里的左相机标定矩阵
Mat distCoeffL = (Mat_<double>(5, 1) << -0.51614, 0.36098, 0.00523, -0.00225, 0.00000);
//对应Matlab所得左i相机畸变参数
Mat cameraMatrixR = (Mat_<double>(3, 3) << 685.03817, 0, 397.39092,
0, 682.54282, 272.04875,
0, 0, 1);
//对应matlab里的右相机标定矩阵
Mat distCoeffR = (Mat_<double>(5, 1) << -0.46640, 0.22148, 0.00947, -0.00242, 0.00000);
//对应Matlab所得右相机畸变参数
Mat T = (Mat_<double>(3, 1) << -61.34485, 2.89570, -4.76870);//T平移向量
//对应Matlab所得T参数
Mat rec = (Mat_<double>(3, 1) << -0.00306, -0.03207, 0.00206);//rec旋转向量,对应matlab om参数
Mat R;//R 旋转矩阵
/*****立体匹配*****/
void stereo_match(int, void*)
{
bm->setBlockSize(2 * blockSize + 5); //SAD窗口大小,5~21之间为宜
bm->setROI1(validROIL);
bm->setROI2(validROIR);
bm->setPreFilterCap(31);
bm->setMinDisparity(0); //最小视差,默认值为0, 可以是负值,int型
bm->setNumDisparities(numDisparities * 16 + 16);//视差窗口,即最大视差值与最小视差值之差,窗口大小必须是16的整数倍,int型
bm->setTextureThreshold(10);
bm->setUniquenessRatio(uniquenessRatio);//uniquenessRatio主要可以防止误匹配
bm->setSpeckleWindowSize(100);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(-1);
Mat disp, disp8;
bm->compute(rectifyImageL, rectifyImageR, disp);//输入图像必须为灰度图
disp.convertTo(disp8, CV_8U, 255 / ((numDisparities * 16 + 16)*16.));//计算出的视差是CV_16S格式
reprojectImageTo3D(disp, xyz, Q, true); //在实际求距离时,ReprojectTo3D出来的X / W, Y / W, Z / W都要乘以16(也就是W除以16),才能得到正确的三维坐标信息。
xyz = xyz * 16;
imshow("disparity", disp8);
}
/*****描述:鼠标操作回调*****/
static void onMouse(int event, int x, int y, int, void*)
{
if (selectObject)
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y);
}
switch (event)
{
case EVENT_LBUTTONDOWN: //鼠标左按钮按下的事件
origin = Point(x, y);
selection = Rect(x, y, 0, 0);
selectObject = true;
cout << origin << "in world coordinate is: " << xyz.at<Vec3f>(origin) << endl;
break;
case EVENT_LBUTTONUP: //鼠标左按钮释放的事件
selectObject = false;
if (selection.width > 0 && selection.height > 0)
break;
}
}
/*****主函数*****/
int main()
{
/*
立体校正
*/
Rodrigues(rec, R); //Rodrigues变换
stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,
0, imageSize, &validROIL, &validROIR);
initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pr, imageSize, CV_32FC1, mapLx, mapLy);
initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);
/*
读取图片
*/
rgbImageL = imread("left58.jpg", CV_LOAD_IMAGE_COLOR);
cvtColor(rgbImageL, grayImageL, CV_BGR2GRAY);
rgbImageR = imread("right58.jpg", CV_LOAD_IMAGE_COLOR);
cvtColor(rgbImageR, grayImageR, CV_BGR2GRAY);
imshow("ImageL Before Rectify", grayImageL);
imshow("ImageR Before Rectify", grayImageR);
/*
经过remap之后,左右相机的图像已经共面并且行对准了
*/
remap(grayImageL, rectifyImageL, mapLx, mapLy, INTER_LINEAR);
remap(grayImageR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);
/*
把校正结果显示出来
*/
Mat rgbRectifyImageL, rgbRectifyImageR;
cvtColor(rectifyImageL, rgbRectifyImageL, CV_GRAY2BGR); //伪彩色图
cvtColor(rectifyImageR, rgbRectifyImageR, CV_GRAY2BGR);
//单独显示
//rectangle(rgbRectifyImageL, validROIL, Scalar(0, 0, 255), 3, 8);
//rectangle(rgbRectifyImageR, validROIR, Scalar(0, 0, 255), 3, 8);
imshow("ImageL After Rectify", rgbRectifyImageL);
imshow("ImageR After Rectify", rgbRectifyImageR);
//显示在同一张图上
Mat canvas;
double sf;
int w, h;
sf = 600. / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width * sf);
h = cvRound(imageSize.height * sf);
canvas.create(h, w * 2, CV_8UC3); //注意通道
//左图像画到画布上
Mat canvasPart = canvas(Rect(w * 0, 0, w, h)); //得到画布的一部分
resize(rgbRectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA); //把图像缩放到跟canvasPart一样大小
Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf), //获得被截取的区域
cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
//rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8); //画上一个矩形
cout << "Painted ImageL" << endl;
//右图像画到画布上
canvasPart = canvas(Rect(w, 0, w, h)); //获得画布的另一部分
resize(rgbRectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
//rectangle(canvasPart, vroiR, Scalar(0, 0, 255), 3, 8);
cout << "Painted ImageR" << endl;
//画上对应的线条
for (int i = 0; i < canvas.rows; i += 16)
line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
imshow("rectified", canvas);
/*
立体匹配
*/
namedWindow("disparity", CV_WINDOW_AUTOSIZE);
// 创建SAD窗口 Trackbar
createTrackbar("BlockSize:\n", "disparity", &blockSize, 8, stereo_match);
// 创建视差唯一性百分比窗口 Trackbar
createTrackbar("UniquenessRatio:\n", "disparity", &uniquenessRatio, 50, stereo_match);
// 创建视差窗口 Trackbar
createTrackbar("NumDisparities:\n", "disparity", &numDisparities, 16, stereo_match);
//鼠标响应函数setMouseCallback(窗口名称, 鼠标回调函数, 传给回调函数的参数,一般取0)
setMouseCallback("disparity", onMouse, 0);
stereo_match(0, 0);
waitKey(0);
return 0;
}
【5】 双目标定----单目
程序参考博客 https://blog.csdn.net/xiao__run/article/details/78900652
#include <iostream>
#include <sstream>
#include <time.h>
#include <stdio.h>
#include <fstream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace std;
#define calibration
int main()
{
#ifdef calibration
ifstream fin("left_img.txt"); /* 标定所用图像文件的路径 */
ofstream fout("caliberation_result_left.txt"); /* 保存标定结果的文件 */
// 读取每一幅图像,从中提取出角点,然后对角点进行亚像素精确化
int image_count = 0; /* 图像数量 */
Size image_size; /* 图像的尺寸 */
Size board_size = Size(7, 5); /* 标定板上每行、列的角点数 */
vector<Point2f> image_points_buf; /* 缓存每幅图像上检测到的角点 */
vector<vector<Point2f>> image_points_seq; /* 保存检测到的所有角点 */
string filename; // 图片名
vector<string> filenames;
while (getline(fin, filename))
{
++image_count;
Mat imageInput = imread(filename);
filenames.push_back(filename);
// 读入第一张图片时获取图片大小
if (image_count == 1)
{
image_size.width = imageInput.cols;
image_size.height = imageInput.rows;
}
/* 提取角点 */
if (0 == findChessboardCorners(imageInput, board_size, image_points_buf))
{
//cout << "can not find chessboard corners!\n"; // 找不到角点
cout << "**" << filename << "** can not find chessboard corners!\n";
exit(1);
}
else
{
Mat view_gray;
cvtColor(imageInput, view_gray, CV_RGB2GRAY); // 转灰度图
/* 亚像素精确化 */
// image_points_buf 初始的角点坐标向量,同时作为亚像素坐标位置的输出
// Size(5,5) 搜索窗口大小
// (-1,-1)表示没有死区
// TermCriteria 角点的迭代过程的终止条件, 可以为迭代次数和角点精度两者的组合
cornerSubPix(view_gray, image_points_buf, Size(5, 5), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
image_points_seq.push_back(image_points_buf); // 保存亚像素角点
/* 在图像上显示角点位置 */
drawChessboardCorners(view_gray, board_size, image_points_buf, false); // 用于在图片中标记角点
imshow("Camera Calibration", view_gray); // 显示图片
waitKey(500); //暂停0.5S
}
}
int CornerNum = board_size.width * board_size.height; // 每张图片上总的角点数
//-------------以下是摄像机标定------------------
/*棋盘三维信息*/
Size square_size = Size(60, 60); /* 实际测量得到的标定板上每个棋盘格的大小 */
vector<vector<Point3f>> object_points; /* 保存标定板上角点的三维坐标 */
/*内外参数*/
Mat cameraMatrix = Mat(3, 3, CV_32FC1, Scalar::all(0)); /* 摄像机内参数矩阵 */
vector<int> point_counts; // 每幅图像中角点的数量
Mat distCoeffs = Mat(1, 5, CV_32FC1, Scalar::all(0)); /* 摄像机的5个畸变系数:k1,k2,p1,p2,k3 */
vector<Mat> tvecsMat; /* 每幅图像的旋转向量 */
vector<Mat> rvecsMat; /* 每幅图像的平移向量 */
/* 初始化标定板上角点的三维坐标 */
int i, j, t;
for (t = 0; t < image_count; t++)
{
vector<Point3f> tempPointSet;
for (i = 0; i < board_size.height; i++)
{
for (j = 0; j < board_size.width; j++)
{
Point3f realPoint;
/* 假设标定板放在世界坐标系中z=0的平面上 */
realPoint.x = i * square_size.width;
realPoint.y = j * square_size.height;
realPoint.z = 0;
tempPointSet.push_back(realPoint);
}
}
object_points.push_back(tempPointSet);
}
/* 初始化每幅图像中的角点数量,假定每幅图像中都可以看到完整的标定板 */
for (i = 0; i < image_count; i++)
{
point_counts.push_back(board_size.width * board_size.height);
}
/* 开始标定 */
// object_points 世界坐标系中的角点的三维坐标
// image_points_seq 每一个内角点对应的图像坐标点
// image_size 图像的像素尺寸大小
// cameraMatrix 输出,内参矩阵
// distCoeffs 输出,畸变系数
// rvecsMat 输出,旋转向量
// tvecsMat 输出,位移向量
// 0 标定时所采用的算法
calibrateCamera(object_points, image_points_seq, image_size, cameraMatrix, distCoeffs, rvecsMat, tvecsMat, 0);
//------------------------标定完成------------------------------------
// -------------------对标定结果进行评价------------------------------
double total_err = 0.0; /* 所有图像的平均误差的总和 */
double err = 0.0; /* 每幅图像的平均误差 */
vector<Point2f> image_points2; /* 保存重新计算得到的投影点 */
fout << "每幅图像的标定误差:\n";
for (i = 0; i < image_count; i++)
{
vector<Point3f> tempPointSet = object_points[i];
/* 通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点 */
projectPoints(tempPointSet, rvecsMat[i], tvecsMat[i], cameraMatrix, distCoeffs, image_points2);
/* 计算新的投影点和旧的投影点之间的误差*/
vector<Point2f> tempImagePoint = image_points_seq[i];
Mat tempImagePointMat = Mat(1, tempImagePoint.size(), CV_32FC2);
Mat image_points2Mat = Mat(1, image_points2.size(), CV_32FC2);
for (int j = 0; j < tempImagePoint.size(); j++)
{
image_points2Mat.at<Vec2f>(0, j) = Vec2f(image_points2[j].x, image_points2[j].y);
tempImagePointMat.at<Vec2f>(0, j) = Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
}
err = norm(image_points2Mat, tempImagePointMat, NORM_L2);
total_err += err /= point_counts[i];
fout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
}
fout << "总体平均误差:" << total_err / image_count << "像素" << endl << endl;
//-------------------------评价完成---------------------------------------------
//-----------------------保存定标结果-------------------------------------------
Mat rotation_matrix = Mat(3, 3, CV_32FC1, Scalar::all(0)); /* 保存每幅图像的旋转矩阵 */
fout << "相机内参数矩阵:" << endl;
fout << cameraMatrix << endl << endl;
fout << "畸变系数:\n";
fout << distCoeffs << endl << endl << endl;
for (int i = 0; i < image_count; i++)
{
fout << "第" << i + 1 << "幅图像的旋转向量:" << endl;
fout << tvecsMat[i] << endl;
/* 将旋转向量转换为相对应的旋转矩阵 */
Rodrigues(tvecsMat[i], rotation_matrix);
fout << "第" << i + 1 << "幅图像的旋转矩阵:" << endl;
fout << rotation_matrix << endl;
fout << "第" << i + 1 << "幅图像的平移向量:" << endl;
fout << rvecsMat[i] << endl << endl;
}
fout << endl;
//--------------------标定结果保存结束-------------------------------
//----------------------显示定标结果--------------------------------
Mat mapx = Mat(image_size, CV_32FC1);
Mat mapy = Mat(image_size, CV_32FC1);
Mat R = Mat::eye(3, 3, CV_32F);
string imageFileName;
std::stringstream StrStm;
for (int i = 0; i != image_count; i++)
{
initUndistortRectifyMap(cameraMatrix, distCoeffs, R, cameraMatrix, image_size, CV_32FC1, mapx, mapy);
Mat imageSource = imread(filenames[i]);
Mat newimage = imageSource.clone();
remap(imageSource, newimage, mapx, mapy, INTER_LINEAR);
StrStm.clear();
imageFileName.clear();
StrStm << i + 1;
StrStm >> imageFileName;
imageFileName += "_d.jpg";
imwrite(imageFileName, newimage);
}
fin.close();
fout.close();
#else
/// 读取一副图片,不改变图片本身的颜色类型(该读取方式为DOS运行模式)
Mat src = imread("F:\\lane_line_detection\\left_img\\1.jpg");
Mat distortion = src.clone();
Mat camera_matrix = Mat(3, 3, CV_32FC1);
Mat distortion_coefficients;
//导入相机内参和畸变系数矩阵
FileStorage file_storage("F:\\lane_line_detection\\left_img\\Intrinsic.xml", FileStorage::READ);
file_storage["CameraMatrix"] >> camera_matrix;
file_storage["Dist"] >> distortion_coefficients;
file_storage.release();
//矫正
cv::undistort(src, distortion, camera_matrix, distortion_coefficients);
cv::imshow("img", src);
cv::imshow("undistort", distortion);
cv::imwrite("undistort.jpg", distortion);
cv::waitKey(0);
#endif // DEBUG
return 0;
}
【6】这个程是不是需要单独标定单目,没仔细看 ----------标定和测距在一起的
/*
* m_stereo_match.cpp
* Created by 杨帮杰 on 7/30/18.
* Right to use this code in any way you want without warranty,
* support or any guarantee of it working
* E-mail: yangbangjie1998@qq.com
*/
#include <iostream>
#include <iomanip>
#include <vector>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/features2d.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/calib3d.hpp"
#define LEFT_CAM_PICS_FILE_PATH "left" //路径
#define RIGHT_CAM_PICS_FILE_PATH "right"
#define PICS_NUMBER 20 //标定图片的数量
const int imageWidth = 640; //摄像头的分辨率
const int imageHeight = 480;
const int boardWidth = 7; //横向的角点数目
const int boardHeight = 5; //纵向的角点数据
const float squareSize = 3.00; //标定板黑白格子的大小 单位cm
const cv::Size imageSize = cv::Size(imageWidth, imageHeight);
const cv::Size boardSize = cv::Size(boardWidth, boardHeight); //标定板的总内角点
cv::Mat Rl, Rr, Pl, Pr, Q; //校正旋转矩阵R,投影矩阵P 重投影矩阵Q
cv::Mat mapLx, mapLy, mapRx, mapRy; //映射表
cv::Rect validROIL, validROIR; //裁剪之后的区域
cv::Mat xyz;
int goodFrameCount = 0; //可检测到的图片对数
cv::Mat R, T, E, F; //R 旋转矢量 T平移矢量 E本征矩阵 F基础矩阵
std::vector<cv::Mat> rvecs; //旋转向量
std::vector<cv::Mat> tvecs; //平移向量
std::vector<std::vector<cv::Point2f>> imagePointL; //左边摄像机所有照片角点的坐标集合
std::vector<std::vector<cv::Point2f>> imagePointR; //右边摄像机所有照片角点的坐标集合
std::vector<std::vector<cv::Point3f>> objRealPoint; //各副图像的角点的实际物理坐标集合
//单目标定得到的内参矩阵和畸变矩阵
cv::Mat cameraMatrixL = (cv::Mat_<double>(3, 3) <<
570.853, 0, 163.936,
0, 565.62, 142.756,
0, 0, 1);
cv::Mat distCoeffL = (cv::Mat_<double>(5, 1) << -0.1464597668354846, -6.154543533838482,
-0.002589887217588616, 0.005985159261180101, 58.40123386205326);
cv::Mat cameraMatrixR = (cv::Mat_<double>(3, 3) <<
568.373, 0, 158.748,
0, 562.243, 114.268,
0, 0, 1);
cv::Mat distCoeffR = (cv::Mat_<double>(5, 1) << -0.2883413485650786, -1.10075802161073,
-0.00209556234492967, 0.007351217947355803, 6.544712063275942);
/*计算标定板上模块的实际物理坐标*/
void calRealPoint(std::vector<std::vector<cv::Point3f>>& obj, int boardwidth, int boardheight, int imgNumber, float squaresize)
{
std::vector<cv::Point3f> imgpoint;
for (float rowIndex = 0.; rowIndex < boardheight; rowIndex++)
{
for (float colIndex = 0.; colIndex < boardwidth; colIndex++)
{
imgpoint.push_back(cv::Point3f(rowIndex * squaresize, colIndex * squaresize, 0));
}
}
for (float imgIndex = 0.; imgIndex < imgNumber; imgIndex++)
{
obj.push_back(imgpoint);
}
}
void onMouse(int event, int x, int y, int, void*)
{
cv::Point origin;
switch (event)
{
case cv::EVENT_LBUTTONDOWN: //鼠标左按钮按下的事件
origin = cv::Point(x, y);
xyz.at<cv::Vec3f>(origin)[2] += 2;
std::cout << origin << "in world coordinate is: " << xyz.at<cv::Vec3f>(origin) << std::endl;
break;
}
}
int main()
{
std::vector<std::string> filelistL;
std::vector<std::string> filelistR;
//读取左摄像头的文件
for (int i = 1; i <= PICS_NUMBER ; i++) {
std::stringstream str;
str << LEFT_CAM_PICS_FILE_PATH << i << ".jpg";//<< std::setfill('0') << std::setw(2)
std::cout << str.str() << std::endl;
filelistL.push_back(str.str());
}
//读取右摄像头的文件
for (int i = 1; i <= PICS_NUMBER ; i++) {
std::stringstream str;
str << RIGHT_CAM_PICS_FILE_PATH << i << ".jpg";//<< std::setfill('0') std::setw(2) <<
std::cout << str.str() << std::endl;
filelistR.push_back(str.str());
}
cv::Mat imageL;
cv::Mat imageR;
while (goodFrameCount < PICS_NUMBER )
{
std::vector<cv::Point2f> imageCornersL;
std::vector<cv::Point2f> imageCornersR;
/*读取左边的图像*/
imageL = cv::imread(filelistL[goodFrameCount], 0);
/*读取右边的图像*/
imageR = cv::imread(filelistR[goodFrameCount], 0);
bool isFindL, isFindR;
isFindL = cv::findChessboardCorners(imageL, boardSize, imageCornersL);
isFindR = cv::findChessboardCorners(imageR, boardSize, imageCornersR);
if (isFindL == true && isFindR == true) //如果两幅图像都找到了所有的角点 则说明这两幅图像是可行的
{
/*
Size(5,5) 搜索窗口的一半大小
Size(-1,-1) 死区的一半尺寸
TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ASsxITER, 20, 0.1)迭代终止条件
*/
cv::cornerSubPix(imageL, imageCornersL, cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.1));
cv::drawChessboardCorners(imageL, boardSize, imageCornersL, isFindL);
// cv::imshow("chessboardL", imageL);
imagePointL.push_back(imageCornersL);
cv::cornerSubPix(imageR, imageCornersR, cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.1));
cv::drawChessboardCorners(imageR, boardSize, imageCornersR, isFindR);
// cv::imshow("chessboardR", imageR);
imagePointR.push_back(imageCornersR);
goodFrameCount++;
std::cout << "The image" << goodFrameCount << " is good" << std::endl;
}
else
{
std::cout << "The image is bad please try again" << std::endl;
std::cout << "left image " << isFindL << std::endl;
std::cout << "right image " << isFindR << std::endl;
}
}
calRealPoint(objRealPoint, boardWidth, boardHeight, PICS_NUMBER , squareSize);
std::cout << "calculate success" << std::endl;
double rms = cv::stereoCalibrate(objRealPoint, imagePointL, imagePointR,
cameraMatrixL, distCoeffL,
cameraMatrixR, distCoeffR,
cv::Size(imageWidth, imageHeight),
R, T, E, F, cv::CALIB_USE_INTRINSIC_GUESS,
cv::TermCriteria(cv::TermCriteria::COUNT
+ cv::TermCriteria::EPS, 100, 1e-5));
std::cout << "Stereo Calibration done with RMS error = " << rms << std::endl;
/*
立体校正的时候需要两幅图像共面并且行对准 以使得立体匹配更加的可靠
使得两幅图像共面的方法就是把两个摄像头的图像投影到一个公共成像面上,这样每幅图像从本图像平面投影到公共图像平面都需要一个旋转矩阵R
stereoRectify 这个函数计算的就是从图像平面投影到公共成像平面的旋转矩阵Rl,Rr。 Rl,Rr即为左右相机平面行对准的校正旋转矩阵。
左相机经过Rl旋转,右相机经过Rr旋转之后,两幅图像就已经共面并且行对准了。
其中Pl,Pr为两个相机的投影矩阵,其作用是将3D点的坐标转换到图像的2D点的坐标:P*[X Y Z 1]' =[x y w]
Q矩阵为重投影矩阵,即矩阵Q可以把2维平面(图像平面)上的点投影到3维空间的点:Q*[x y d 1] = [X Y Z W]。其中d为左右两幅图像的时差
*/
//对标定过的图像进行校正
cv::stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q,
cv::CALIB_ZERO_DISPARITY, -1, imageSize, &validROIL, &validROIR);
/*
根据stereoRectify 计算出来的R 和 P 来计算图像的映射表 mapx,mapy
mapx,mapy这两个映射表接下来可以给remap()函数调用,来校正图像,使得两幅图像共面并且行对准
ininUndistortRectifyMap()的参数newCameraMatrix就是校正后的摄像机矩阵。在openCV里面,校正后的计算机矩阵Mrect是跟投影矩阵P一起返回的。
所以我们在这里传入投影矩阵P,此函数可以从投影矩阵P中读出校正后的摄像机矩阵
*/
//摄像机校正映射
cv::Size newSize(static_cast<int>(imageL.cols*1.2), static_cast<int>(imageL.rows*1.2));
cv::initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pl, newSize,
CV_32FC1, mapLx, mapLy);
cv::initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, newSize,
CV_32FC1, mapRx, mapRy);
std::cout << "---------------cameraMatrixL & distCoeffL ----------------- " << std::endl;
std::cout << "cameraMatrixL" << std::endl << cameraMatrixL << std::endl;
std::cout << "distCoeffL" << std::endl << distCoeffL << std::endl;
std::cout << "---------------cameraMatrixR & distCoeffR ----------------- " << std::endl;
std::cout << "cameraMatrixR" << std::endl << cameraMatrixR << std::endl;
std::cout << "distCoeffR " << std::endl << distCoeffR << std::endl;
std::cout << "---------------R & T ----------------- " << std::endl;
std::cout << "R " << std::endl << R << std::endl;
std::cout << "T " << std::endl << T << std::endl;
std::cout << "---------------Pl & Pr ----------------- " << std::endl;
std::cout << "Pl " << std::endl << Pl << std::endl;
std::cout << "Pr " << std::endl << Pr << std::endl;
std::cout << "---------------Rl & Rr ----------------- " << std::endl;
std::cout << "Rl " << std::endl << Rl << std::endl;
std::cout << "Rr " << std::endl << Rr << std::endl;
std::cout << "--------------- Q ----------------- " << std::endl;
std::cout << "Q " << std::endl << Q << std::endl;
/*************使用SGBM算子验证结果**************/
#if 0
imageL = cv::imread(filelistL[13], 0);
imageR = cv::imread(filelistR[13], 0);
#else
imageL = cv::imread("left1.jpg", 0);
imageR = cv::imread("right1.jpg", 0);
#endif
cv::Mat rectifyImageL, rectifyImageR;
cv::remap(imageL, rectifyImageL, mapLx, mapLy, cv::INTER_LINEAR);
cv::remap(imageR, rectifyImageR, mapRx, mapRy, cv::INTER_LINEAR);
// cv::imshow("rectifyImageL", rectifyImageL);
// cv::imshow("rectifyImageR", rectifyImageR);
cv::Ptr<cv::StereoSGBM> sgbm = cv::StereoSGBM::create(0, 16, 3);
int sgbmWinSize = 3;
int cn = imageL.channels();
int numberOfDisparities = ((imageSize.width / 8) + 15) & -16;
sgbm->setPreFilterCap(63);
sgbm->setBlockSize(sgbmWinSize);
sgbm->setP1(8 * cn*sgbmWinSize*sgbmWinSize);
sgbm->setP2(32 * cn*sgbmWinSize*sgbmWinSize);
sgbm->setMinDisparity(0);
sgbm->setNumDisparities(numberOfDisparities);
sgbm->setUniquenessRatio(10);
sgbm->setSpeckleWindowSize(50);
sgbm->setSpeckleRange(32);
sgbm->setDisp12MaxDiff(1);
sgbm->setMode(cv::StereoSGBM::MODE_SGBM);
cv::Mat disp, disp8;
sgbm->compute(rectifyImageL, rectifyImageR, disp);
disp.convertTo(disp8, CV_8U, 255 / (numberOfDisparities*16.));
cv::imshow("disparity8", disp8);
cv::reprojectImageTo3D(disp, xyz, Q, true);
xyz = xyz * 16; // xyz=[X/W Y/W Z/W],乘以16得到真实坐标
cv::setMouseCallback("disparity8", onMouse, 0);
//显示矫正后的图像
for (int i = 20; i < rectifyImageL.rows; i += 20)
{
cv::line(rectifyImageL, cv::Point(0, i), cv::Point(rectifyImageL.cols, i), cv::Scalar(255, 255, 255));
cv::line(rectifyImageR, cv::Point(0, i), cv::Point(rectifyImageL.cols, i), cv::Scalar(255, 255, 255));
}
cv::Mat imageMatches;
cv::drawMatches(rectifyImageL, std::vector<cv::KeyPoint>(), // 1st image
rectifyImageR, std::vector<cv::KeyPoint>(), // 2nd image
std::vector<cv::DMatch>(),
imageMatches, // the image produced
cv::Scalar(255, 255, 255),
cv::Scalar(255, 255, 255),
std::vector<char>(),
2);
cv::imshow("imageMatches", imageMatches);
cv::waitKey();
return 0;
}
【7】 这个是带标定的测距程序,结果相对其他博客还行
代码转自 https://blog.csdn.net/SweetWind1996/article/details/90415237
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <fstream>
using namespace cv;
using namespace std;
const char* imageName_L = "left1.jpg"; // 用于检测深度的图像
const char* imageName_R = "right1.jpg";
const char* imageList_L = "caliberationpics_L.txt"; // 左相机的标定图片名称列表
const char* imageList_R = "caliberationpics_R.txt"; // 右相机的标定图片名称列表
const char* singleCalibrate_result_L = "calibrationresults_L.txt"; // 存放左相机的标定结果
const char* singleCalibrate_result_R = "calibrationresults_R.txt"; // 存放右相机的标定结果
const char* stereoCalibrate_result_L = "stereocalibrateresult_L.txt"; // 存放立体标定结果
const char* stereoCalibrate_result_R = "stereocalibrateresult_R.txt";
const char* stereoRectifyParams = "stereoRectifyParams.txt"; // 存放立体校正参数
vector<vector<Point2f>> corners_seq_L; // 所有角点坐标
vector<vector<Point2f>> corners_seq_R;
vector<vector<Point3f>> objectPoints_L; // 三维坐标
vector<vector<Point3f>> objectPoints_R;
Mat cameraMatrix_L = Mat(3, 3, CV_32FC1, Scalar::all(0)); // 相机的内参数
Mat cameraMatrix_R = Mat(3, 3, CV_32FC1, Scalar::all(0)); // 初始化相机的内参数
Mat distCoeffs_L = Mat(1, 5, CV_32FC1, Scalar::all(0)); // 相机的畸变系数
Mat distCoeffs_R = Mat(1, 5, CV_32FC1, Scalar::all(0)); // 初始化相机的畸变系数
Mat R, T, E, F; // 立体标定参数
Mat R1, R2, P1, P2, Q; // 立体校正参数
Mat mapl1, mapl2, mapr1, mapr2; // 图像重投影映射表
Mat img1_rectified, img2_rectified, disparity, result3DImage; // 校正图像 视差图 深度图
Size patternSize = Size(7, 5); // 行列内角点个数
Size chessboardSize = Size(30, 30); // 棋盘上每个棋盘格的大小30mm
Size imageSize; // 图像尺寸
Rect validRoi[2];
/*
单目标定
参数:
imageList 存放标定图片名称的txt
singleCalibrateResult 存放标定结果的txt
objectPoints 世界坐标系中点的坐标
corners_seq 存放图像中的角点,用于立体标定
cameraMatrix 相机的内参数矩阵
distCoeffs 相机的畸变系数
imageSize 输入图像的尺寸(像素)
patternSize 标定板每行的角点个数, 标定板每列的角点个数 (9, 6)
chessboardSize 棋盘上每个方格的边长(mm)
注意:亚像素精确化时,允许输入单通道,8位或者浮点型图像。由于输入图像的类型不同,下面用作标定函数参数的内参数矩阵和畸变系数矩阵在初始化时也要数据注意类型。
*/
bool singleCameraCalibrate(const char* imageList, const char* singleCalibrateResult, vector<vector<Point3f>>& objectPoints,
vector<vector<Point2f>>& corners_seq, Mat& cameraMatrix, Mat& distCoeffs, Size& imageSize, Size patternSize, Size chessboardSize)
{
int n_boards = 0;
ifstream imageStore(imageList); // 打开存放标定图片名称的txt
ofstream resultStore(singleCalibrateResult); // 保存标定结果的txt
// 开始提取角点坐标
vector<Point2f> corners; // 存放一张图片的角点坐标
string imageName; // 读取的标定图片的名称
printf("1---while");
while (getline(imageStore, imageName)) // 读取txt的每一行(每一行存放了一张标定图片的名称)
{
n_boards++;
Mat imageInput = imread(imageName);
cvtColor(imageInput, imageInput, CV_RGB2GRAY);
imageSize.width = imageInput.cols; // 获取图片的宽度
imageSize.height = imageInput.rows; // 获取图片的高度
// 查找标定板的角点
bool found = findChessboardCorners(imageInput, patternSize, corners); // 最后一个参数int flags的缺省值为:CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE
// 亚像素精确化。在findChessboardCorners中自动调用了cornerSubPix,为了更加精细化,我们自己再调用一次。
printf("2---if");
if (found) // 当所有的角点都被找到
{
TermCriteria criteria = TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001); // 终止标准,迭代40次或者达到0.001的像素精度
cornerSubPix(imageInput, corners, Size(11, 11), Size(-1, -1), criteria);// 由于我们的图像只存较大,将搜索窗口调大一些,(11, 11)为真实窗口的一半,真实大小为(11*2+1, 11*2+1)--(23, 23)
corners_seq.push_back(corners); // 存入角点序列
// 绘制角点
drawChessboardCorners(imageInput, patternSize, corners, true);
imshow("cornersframe", imageInput);
waitKey(500); // 暂停0.5s
printf("3---500");
}
}
//destroyWindow("cornersframe");
// 进行相机标定
// 计算角点对应的三维坐标
printf("4---500");
int pic, i, j;
for (pic = 0; pic < n_boards; pic++)
{
vector<Point3f> realPointSet;
for (i = 0; i < patternSize.height; i++)
{
for (j = 0; j < patternSize.width; j++)
{
Point3f realPoint;
// 假设标定板位于世界坐标系Z=0的平面
realPoint.x = j * chessboardSize.width;
realPoint.y = i * chessboardSize.height;
realPoint.z = 0;
realPointSet.push_back(realPoint);
}
}
objectPoints.push_back(realPointSet);
}
// 执行标定程序
vector<Mat> rvec; // 旋转向量
vector<Mat> tvec; // 平移向量
calibrateCamera(objectPoints, corners_seq, imageSize, cameraMatrix, distCoeffs, rvec, tvec, 0);
// 保存标定结果
resultStore << "相机内参数矩阵" << endl;
resultStore << cameraMatrix << endl << endl;
resultStore << "相机畸变系数" << endl;
resultStore << distCoeffs << endl << endl;
// 计算重投影点,与原图角点比较,得到误差
double errPerImage = 0.; // 每张图像的误差
double errAverage = 0.; // 所有图像的平均误差
double totalErr = 0.; // 误差总和
vector<Point2f> projectImagePoints; // 重投影点
for (i = 0; i < n_boards; i++)
{
vector<Point3f> tempObjectPoints = objectPoints[i]; // 临时三维点
// 计算重投影点
projectPoints(tempObjectPoints, rvec[i], tvec[i], cameraMatrix, distCoeffs, projectImagePoints);
// 计算新的投影点与旧的投影点之间的误差
vector<Point2f> tempCornersPoints = corners_seq[i];// 临时存放旧投影点
Mat tempCornersPointsMat = Mat(1, tempCornersPoints.size(), CV_32FC2); // 定义成两个通道的Mat是为了计算误差
Mat projectImagePointsMat = Mat(1, projectImagePoints.size(), CV_32FC2);
// 赋值
for (int j = 0; j < tempCornersPoints.size(); j++)
{
projectImagePointsMat.at<Vec2f>(0, j) = Vec2f(projectImagePoints[j].x, projectImagePoints[j].y);
tempCornersPointsMat.at<Vec2f>(0, j) = Vec2f(tempCornersPoints[j].x, tempCornersPoints[j].y);
}
// opencv里的norm函数其实把这里的两个通道分别分开来计算的(X1-X2)^2的值,然后统一求和,最后进行根号
errPerImage = norm(tempCornersPointsMat, projectImagePointsMat, NORM_L2) / (patternSize.width * patternSize.height);
totalErr += errPerImage;
resultStore << "第" << i + 1 << "张图像的平均误差为:" << errPerImage << endl;
}
resultStore << "全局平局误差为:" << totalErr / n_boards << endl;
imageStore.close();
resultStore.close();
return true;
}
/*
双目标定:计算两摄像机相对旋转矩阵 R,平移向量 T, 本征矩阵E, 基础矩阵F
参数:
stereoCalibrateResult 存放立体标定结果的txt
objectPoints 三维点
imagePoints 二维图像上的点
cameraMatrix 相机内参数
distCoeffs 相机畸变系数
imageSize 图像尺寸
R 左右相机相对的旋转矩阵
T 左右相机相对的平移向量
E 本征矩阵
F 基础矩阵
*/
bool stereoCalibrate(const char* stereoCalibrateResult, vector<vector<Point3f>> objectPoints, vector<vector<Point2f>> imagePoints1, vector<vector<Point2f>> imagePoints2,
Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size& imageSize, Mat& R, Mat& T, Mat& E, Mat& F)
{
ofstream stereoStore(stereoCalibrateResult);
TermCriteria criteria = TermCriteria(TermCriteria::COUNT | TermCriteria::EPS, 30, 1e-6); // 终止条件
stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1,
cameraMatrix2, distCoeffs2, imageSize, R, T, E, F, CALIB_FIX_INTRINSIC, criteria); // 注意参数顺序,可以到保存的文件中查看,避免返回时出错
stereoStore << "左相机内参数:" << endl;
stereoStore << cameraMatrix1 << endl;
stereoStore << "右相机内参数:" << endl;
stereoStore << cameraMatrix2 << endl;
stereoStore << "左相机畸变系数:" << endl;
stereoStore << distCoeffs1 << endl;
stereoStore << "右相机畸变系数:" << endl;
stereoStore << distCoeffs2 << endl;
stereoStore << "旋转矩阵:" << endl;
stereoStore << R << endl;
stereoStore << "平移向量:" << endl;
stereoStore << T << endl;
stereoStore << "本征矩阵:" << endl;
stereoStore << E << endl;
stereoStore << "基础矩阵:" << endl;
stereoStore << F << endl;
stereoStore.close();
return true;
}
/*
立体校正
参数:
stereoRectifyParams 存放立体校正结果的txt
cameraMatrix 相机内参数
distCoeffs 相机畸变系数
imageSize 图像尺寸
R 左右相机相对的旋转矩阵
T 左右相机相对的平移向量
R1, R2 行对齐旋转校正
P1, P2 左右投影矩阵
Q 重投影矩阵
map1, map2 重投影映射表
*/
Rect stereoRectification(const char* stereoRectifyParams, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2,
Size& imageSize, Mat& R, Mat& T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, Mat& mapl1, Mat& mapl2, Mat& mapr1, Mat& mapr2)
{
Rect validRoi[2];
ofstream stereoStore(stereoRectifyParams);
stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize,
R, T, R1, R2, P1, P2, Q, 0, -1, imageSize, &validRoi[0], &validRoi[1]);
// 计算左右图像的重投影映射表
stereoStore << "R1:" << endl;
stereoStore << R1 << endl;
stereoStore << "R2:" << endl;
stereoStore << R2 << endl;
stereoStore << "P1:" << endl;
stereoStore << P1 << endl;
stereoStore << "P2:" << endl;
stereoStore << P2 << endl;
stereoStore << "Q:" << endl;
stereoStore << Q << endl;
stereoStore.close();
cout << "R1:" << endl;
cout << R1 << endl;
cout << "R2:" << endl;
cout << R2 << endl;
cout << "P1:" << endl;
cout << P1 << endl;
cout << "P2:" << endl;
cout << P2 << endl;
cout << "Q:" << endl;
cout << Q << endl;
initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, imageSize, CV_32FC1, mapl1, mapl2);
initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, imageSize, CV_32FC1, mapr1, mapr2);
return validRoi[0], validRoi[1];
}
/*
计算视差图
参数:
imageName1 左相机拍摄的图像
imageName2 右相机拍摄的图像
img1_rectified 重映射后的左侧相机图像
img2_rectified 重映射后的右侧相机图像
map 重投影映射表
*/
bool computeDisparityImage(const char* imageName1, const char* imageName2, Mat& img1_rectified,
Mat& img2_rectified, Mat& mapl1, Mat& mapl2, Mat& mapr1, Mat& mapr2, Rect validRoi[2], Mat& disparity)
{
// 首先,对左右相机的两张图片进行重构
Mat img1 = imread(imageName1);
Mat img2 = imread(imageName2);
if (img1.empty() | img2.empty())
{
cout << "图像为空" << endl;
}
Mat gray_img1, gray_img2;
cvtColor(img1, gray_img1, COLOR_BGR2GRAY);
cvtColor(img2, gray_img2, COLOR_BGR2GRAY);
Mat canvas(imageSize.height, imageSize.width * 2, CV_8UC1); // 注意数据类型
Mat canLeft = canvas(Rect(0, 0, imageSize.width, imageSize.height));
Mat canRight = canvas(Rect(imageSize.width, 0, imageSize.width, imageSize.height));
gray_img1.copyTo(canLeft);
gray_img2.copyTo(canRight);
imwrite("校正前左右相机图像.jpg", canvas);
remap(gray_img1, img1_rectified, mapl1, mapl2, INTER_LINEAR);
remap(gray_img2, img2_rectified, mapr1, mapr2, INTER_LINEAR);
imwrite("左相机校正图像.jpg", img1_rectified);
imwrite("右相机校正图像.jpg", img2_rectified);
img1_rectified.copyTo(canLeft);
img2_rectified.copyTo(canRight);
rectangle(canLeft, validRoi[0], Scalar(255, 255, 255), 5, 8);
rectangle(canRight, validRoi[1], Scalar(255, 255, 255), 5, 8);
for (int j = 0; j <= canvas.rows; j += 16)
line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
imwrite("校正后左右相机图像.jpg", canvas);
// 进行立体匹配
Ptr<StereoBM> bm = StereoBM::create(16, 9); // Ptr<>是一个智能指针
bm->compute(img1_rectified, img2_rectified, disparity); // 计算视差图
disparity.convertTo(disparity, CV_32F, 1.0 / 16);
// 归一化视差映射
normalize(disparity, disparity, 0, 256, NORM_MINMAX, -1);
return true;
}
// 鼠标回调函数,点击视差图显示深度
void onMouse(int event, int x, int y, int flags, void *param)
{
Point point;
point.x = x;
point.y = y;
if (event == EVENT_LBUTTONDOWN)
{
cout << result3DImage.at<Vec3f>(point) << endl;
}
}
int main()
{
singleCameraCalibrate(imageList_L, singleCalibrate_result_L, objectPoints_L, corners_seq_L, cameraMatrix_L,
distCoeffs_L, imageSize, patternSize, chessboardSize);
cout << "已完成左相机的标定!" << endl;
singleCameraCalibrate(imageList_R, singleCalibrate_result_R, objectPoints_R, corners_seq_R, cameraMatrix_R,
distCoeffs_R, imageSize, patternSize, chessboardSize);
cout << "已完成右相机的标定!" << endl;
stereoCalibrate(stereoCalibrate_result_L, objectPoints_L, corners_seq_L, corners_seq_R, cameraMatrix_L, distCoeffs_L,
cameraMatrix_R, distCoeffs_R, imageSize, R, T, E, F);
cout << "相机立体标定完成!" << endl;
//stereoCalibrate(stereoCalibrate_result_R, objectPoints_R, corners_seq_L, corners_seq_R, cameraMatrix_L, distCoeffs_L,
// cameraMatrix_R, distCoeffs_R, imageSize, R2, T2, E2, F2);
//cout << "右相机立体标定完成!" << endl;
validRoi[0], validRoi[1] = stereoRectification(stereoRectifyParams, cameraMatrix_L, distCoeffs_L, cameraMatrix_R, distCoeffs_R,
imageSize, R, T, R1, R2, P1, P2, Q, mapl1, mapl2, mapr1, mapr2);
cout << "已创建图像重投影映射表!" << endl;
computeDisparityImage(imageName_L, imageName_R, img1_rectified, img2_rectified, mapl1, mapl2, mapr1, mapr2, validRoi, disparity);
cout << "视差图建立完成!" << endl;
// 从三维投影获得深度映射
reprojectImageTo3D(disparity, result3DImage, Q);
imshow("视差图", disparity);
setMouseCallback("视差图", onMouse);
waitKey(0);
//destroyAllWindows();
return 0;
}
文件目录如图:
()图片列表
()图片位置,注意图片名和上图一定对应,不多不少要。
()最后效果图