融合了前面几个步骤的程序,黑白的和彩色的视差图都有了,画布的程序参考《学习opencv》这本书的,效果如下:(具体代码意思慢慢看程序理解)
#include "opencv2/opencv.hpp"
#include <highgui.h>
#include <cv.h>
#include <cxcore.h>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
VideoCapture inputVideo1(0);
VideoCapture inputVideo2(1);
Mat img1;
Mat img2;
Mat imgCalibration1;
Mat imgCalibration2;
inputVideo1 >> img1;
inputVideo2 >> img2;
Mat cameraMatrixLeft = Mat::eye(3, 3, CV_64F);//左摄像头畸变参数(我就说怎么少了两个东西,原来双目标定的平移向量和旋转向量没求出来- -)
cameraMatrixLeft.at<double>(0, 0) = 7.97747742e+02;
cameraMatrixLeft.at<double>(0, 1) = 0;
cameraMatrixLeft.at<double>(0, 2) = 3.38423920e+02;
cameraMatrixLeft.at<double>(1, 1) = 7.95846191e+02;
cameraMatrixLeft.at<double>(1, 2) = 2.38578918e+02;
Mat cameraMatrixRight = Mat::eye(3, 3, CV_64F);//右摄像头畸变参数
cameraMatrixRight.at<double>(0, 0) = 8.03603271e+02;
cameraMatrixRight.at<double>(0, 1) = 0;
cameraMatrixRight.at<double>(0, 2) = 3.31736206e+02;
cameraMatrixRight.at<double>(1, 1) = 8.01632874e+02;
cameraMatrixRight.at<double>(1, 2) = 2.66235107e+02;
Mat distCoeffsLeft = Mat::zeros(5, 1, CV_64F);
distCoeffsLeft.at<double>(0, 0) = -1.19046479e-01;
distCoeffsLeft.at<double>(1, 0) = -3.16432804e-01;
distCoeffsLeft.at<double>(2, 0) = 1.25176529e-03;
distCoeffsLeft.at<double>(3, 0) = -1.88818539e-03;
distCoeffsLeft.at<double>(4, 0) = 0;
Mat distCoeffsRight = Mat::zeros(5, 1, CV_64F);
distCoeffsRight.at<double>(0, 0) = -6.44572973e-02;
distCoeffsRight.at<double>(1, 0) = -1.03351343e+00;
distCoeffsRight.at<double>(2, 0) = -3.31324612e-04;
distCoeffsRight.at<double>(3, 0) = -6.24597212e-03;
distCoeffsRight.at<double>(4, 0) = 0;
Mat view1, rview1, map1, map2;
Mat view2, rview2, map3, map4;
Rect validROIL,validROIR;//validROI是指裁剪之后的区域
Size imageSize1;
Size imageSize2;
imageSize1 = img1.size();
imageSize2 = img2.size();
initUndistortRectifyMap(cameraMatrixLeft, distCoeffsLeft, Mat(),
getOptimalNewCameraMatrix(cameraMatrixLeft, distCoeffsLeft, imageSize1, 1, imageSize1, 0),
imageSize1, CV_16SC2, map1, map2);
initUndistortRectifyMap(cameraMatrixRight, distCoeffsRight, Mat(),
getOptimalNewCameraMatrix(cameraMatrixRight, distCoeffsRight, imageSize2, 1, imageSize2, 0),
imageSize2, CV_16SC2, map3, map4);
while(1)
{
inputVideo1 >> img1;
inputVideo2 >> img2;
if (img1.empty()||img2.empty())
break;
remap(img1, imgCalibration1, map1, map2, INTER_LINEAR);
remap(img2, imgCalibration2, map3, map4, INTER_LINEAR);
// imshow("left", img1);
// imshow("right", img2);
// imshow("Calibration1", imgCalibration1);
// imshow("Calibration2", imgCalibration2);
cv::StereoSGBM sgbm;//立体匹配算法
int SADWindowSize = 11;
sgbm.preFilterCap = 63;
int cn=1;
sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;
sgbm.P1 = 4*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.P2 = 32*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.minDisparity = 0;
sgbm.numberOfDisparities = 32;
sgbm.uniquenessRatio = 10;
sgbm.speckleWindowSize = 100;
sgbm.speckleRange = 32;
sgbm.disp12MaxDiff = 1;
Mat disp, img;
int64 t = getTickCount();
sgbm((Mat)imgCalibration1, (Mat)imgCalibration2, disp);
disp.convertTo(img, CV_8U, 255/(32*16.));
imshow("disparity", img);
Mat img_color(img.rows, img.cols, CV_8UC3);//构造RGB图像
#define IMG_B(img,y,x) img.at<Vec3b>(y,x)[0]
#define IMG_G(img,y,x) img.at<Vec3b>(y,x)[1]
#define IMG_R(img,y,x) img.at<Vec3b>(y,x)[2]
uchar tmp2=0;
for (int y=0;y<img.rows;y++)//转为彩虹图的具体算法,主要思路是把灰度图对应的0~255的数值分别转换成彩虹色:红、橙、黄、绿、青、蓝。
{
for (int x=0;x<img.cols;x++)
{
tmp2 = img.at<uchar>(y,x);
if (tmp2 <= 51)
{
IMG_B(img_color,y,x) = 255;
IMG_G(img_color,y,x) = tmp2*5;
IMG_R(img_color,y,x) = 0;
}
else if (tmp2 <= 102)
{
tmp2-=51;
IMG_B(img_color,y,x) = 255-tmp2*5;
IMG_G(img_color,y,x) = 255;
IMG_R(img_color,y,x) = 0;
}
else if (tmp2 <= 153)
{
tmp2-=102;
IMG_B(img_color,y,x) = 0;
IMG_G(img_color,y,x) = 255;
IMG_R(img_color,y,x) = tmp2*5;
}
else if (tmp2 <= 204)
{
tmp2-=153;
IMG_B(img_color,y,x) = 0;
IMG_G(img_color,y,x) = 255-uchar(128.0*tmp2/51.0+0.5);
IMG_R(img_color,y,x) = 255;
}
else
{
tmp2-=204;
IMG_B(img_color,y,x) = 0;
IMG_G(img_color,y,x) = 127-uchar(127.0*tmp2/51.0+0.5);
IMG_R(img_color,y,x) = 255;
}
}
}
namedWindow("img_ rainbowcolor");
imshow("img_ rainbowcolor", img_color);
Mat canvas;//画布程序
double sf;
int w,h;
Size imageSize;
imageSize = img.size();
sf=600./MAX(imageSize.width,imageSize.height);
w=cvRound(imageSize.width*sf);
h=cvRound(imageSize.height*sf);
canvas.create(h,w*2,CV_8UC3);
Mat canvasPart=canvas(Rect(w*0,0,w,h));
resize(imgCalibration1,canvasPart,canvasPart.size(),0,0,INTER_AREA);
Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf), //获得被截取的区域
cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8); //画上一个矩形
canvasPart=canvas(Rect(w,0,w,h));
resize(imgCalibration2,canvasPart,canvasPart.size(),0,0,INTER_LINEAR);
Rect vroiR(cvRound(validROIR.x*sf),cvRound(validROIR.y*sf),
cvRound(validROIR.width*sf),cvRound(validROIR.height*sf));
rectangle(canvasPart,vroiR,Scalar(0,255,0),3,8);
for(int i=0;i<canvas.rows;i+=16)
line(canvas,Point(0,i),Point(canvas.cols,i),Scalar(0,255,0),1,8);
imshow("rectified",canvas);
char key = waitKey(1);
if (key == 27 || key == 'q' || key == 'Q')
break;
}
cvDestroyAllWindows();
return 0;
}