最近对双目视觉的应用挺感兴趣的,之前又完成了双目的标定,刚好可以把数据拿过来使用,继续做相关实验,实验里的代码很多都是参考大神的,这里也分享下这些链接:
https://blog.csdn.net/weixin_39449570/article/details/79033314
https://blog.csdn.net/Loser__Wang/article/details/52836042
由于opencv用的版本是2.4.13,使用上跟3.0以上版本还是有所区别的,2.4.13可参考
https://blog.csdn.net/chentravelling/article/details/53672578
3.0以上版本StereoBM等定义为纯虚类,不能直接实例化,可参考
https://blog.csdn.net/chentravelling/article/details/70254682
下面简单说下视觉差的原理:
其中,Tx为两相机光心间的距离,P(Xc,Yc,Zc)为左相机坐标系下的相机坐标,xl为P在左相机像平面X轴坐标,xr为P在右相机像平面X轴坐标(这里只考虑相机在同一水平位置且焦距相同),根据成像原理有
将右相机转换到左相机坐标系下,则P在相机2的坐标转换到相机1后的坐标为P(Xc-Tx,Yc,Zc),模型如下:
视差:
所以在这中简单的模型下只有知道视差d就可以求得距离Zc
接下来就是上代码了,在使用代码前需要进行双目标定,将相机的内外参数求出来:
#include <stdio.h>
#include <iostream>
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
using namespace std;
using namespace cv;
enum { STEREO_BM = 0,
STEREO_SGBM = 1,
STEREO_HH = 2,
STEREO_VAR = 3 };
Size imageSize = Size(640, 480);
Point origin; //鼠标按下的起始点
Rect selection; //定义矩形选框
bool selectObject = false;
Mat xyz; //三维坐标
static void saveXYZ(const char* filename, const Mat& mat)
{
const double max_z = 1.0e4;
FILE* fp = fopen(filename, "wt");
for (int y = 0; y < mat.rows; y++)
{
for (int x = 0; x < mat.cols; x++)
{
Vec3f point = mat.at<Vec3f>(y, x);
if (fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue;
fprintf(fp, "%f %f %f\n", point[0], point[1], point[2]);
}
}
fclose(fp);
}
/*给深度图上色*/
void GenerateFalseMap(cv::Mat &src, cv::Mat &disp)
{
// color map
float max_val = 255.0f;
float map[8][4] = { { 0, 0, 0, 114 }, { 0, 0, 1, 185 }, { 1, 0, 0, 114 }, { 1, 0, 1, 174 },
{ 0, 1, 0, 114 }, { 0, 1, 1, 185 }, { 1, 1, 0, 114 }, { 1, 1, 1, 0 } };
float sum = 0;
for (int i = 0; i < 8; i++)
sum += map[i][3];
float weights[8]; // relative weights
float cumsum[8]; // cumulative weights
cumsum[0] = 0;
for (int i = 0; i < 7; i++) {
weights[i] = sum / map[i][3];
cumsum[i + 1] = cumsum[i] + map[i][3] / sum;
}
int height_ = src.rows;
int width_ = src.cols;
// for all pixels do
for (int v = 0; v < height_; v++) {
for (int u = 0; u < width_; u++) {
// get normalized value
float val = std::min(std::max(src.data[v*width_ + u] / max_val, 0.0f), 1.0f);
// find bin
int i;
for (i = 0; i < 7; i++)
if (val < cumsum[i + 1])
break;
// compute red/green/blue values
float w = 1.0 - (val - cumsum[i])*weights[i];
uchar r = (uchar)((w*map[i][0] + (1.0 - w)*map[i + 1][0]) * 255.0);
uchar g = (uchar)((w*map[i][1] + (1.0 - w)*map[i + 1][1]) * 255.0);
uchar b = (uchar)((w*map[i][2] + (1.0 - w)*map[i + 1][2]) * 255.0);
//rgb内存连续存放
disp.data[v*width_ * 3 + 3 * u + 0] = b;
disp.data[v*width_ * 3 + 3 * u + 1] = g;
disp.data[v*width_ * 3 + 3 * u + 2] = r;
}
}
}
/*****描述:鼠标操作回调*****/
static void onMouse(int event, int x, int y, int, void*)
{
if (selectObject)
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y);
}
switch (event)
{
case EVENT_LBUTTONDOWN: //鼠标左按钮按下的事件
origin = Point(x, y);
selection = Rect(x, y, 0, 0);
selectObject = true;
cout << origin << "in world coordinate is: " << xyz.at<Vec3f>(origin) << endl;
break;
case EVENT_LBUTTONUP: //鼠标左按钮释放的事件
selectObject = false;
if (selection.width > 0 && selection.height > 0)
break;
}
}
void readCamParam(string& filename, Mat& camL_Matrix, Mat& camL_distcoeff,
Mat& camR_Matrix, Mat& camR_distcoeff,Mat& R,Mat& T)
{
FileStorage fs(filename, FileStorage::READ);
if (!fs.isOpened())
{
cout << "there is not the param file!" << endl;
}
if (fs.isOpened())
{
camL_Matrix = Mat(3, 3, CV_64F);
fs["cam1_Matrix"] >> camL_Matrix;
camL_distcoeff = Mat(3, 1, CV_64F);
fs["cam1_distcoeff"] >> camL_distcoeff;
camR_Matrix = Mat(3, 3, CV_64F);
fs["cam2_Matrix"] >> camR_Matrix;
camR_distcoeff = Mat(3, 1, CV_64F);
fs["cam2_distcoeff"] >> camR_distcoeff;
R = Mat(3, 3, CV_64F);
fs["R"] >> R;
T = Mat(3, 1, CV_64F);
fs["T"] >> T;
}
}
void images2one(Size& imageSize, Mat& rectifyImageL, Rect& validROIL,
Mat& rectifyImageR, Rect& validROIR)
{
//显示在同一张图上
Mat canvas;
double sf;
int w, h;
sf = 700. / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width * sf); //cvRound输入一个double类型返回一个整形
h = cvRound(imageSize.height * sf);
canvas.create(h, w * 2, CV_8UC3); //注意通道
//左图像画到画布上
Mat canvasPart = canvas(Rect(w * 0, 0, w, h)); //得到画布的一部分
resize(rectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA); //把图像缩放到跟canvasPart一样大小
Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf), //获得被截取的区域
cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8); //画上一个矩形
cout << "Painted ImageL" << endl;
//右图像画到画布上
canvasPart = canvas(Rect(w, 0, w, h)); //获得画布的另一部分
resize(rectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
rectangle(canvasPart, vroiR, Scalar(0, 0, 255), 3, 8);
cout << "Painted ImageR" << endl;
//画上对应的线条
for (int i = 0; i < canvas.rows; i += 16)
line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
imshow("rectified", canvas);
}
void stereoSGBM_match(int alg, Mat& imgL, Mat& imgR, Mat& disp8, Mat& dispf)
{
int SADWindowSize = 0, numberOfDisparities = 0;
float scale = 1.f;
StereoBM bm;
StereoSGBM sgbm;
StereoVar var;
Size img_size = imgL.size();
Rect roi1, roi2;
numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width / 8) + 15) & -16;
bm.state->roi1 = roi1;
bm.state->roi2 = roi2;
bm.state->preFilterCap = 31;
bm.state->SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 9;
bm.state->minDisparity = 0;
bm.state->numberOfDisparities = numberOfDisparities;
bm.state->textureThreshold = 10;
bm.state->uniquenessRatio = 15;
bm.state->speckleWindowSize = 100;
bm.state->speckleRange = 32;
bm.state->disp12MaxDiff = 1;
sgbm.preFilterCap = 63;
sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;
int cn = imgL.channels();
sgbm.P1 = 8 * cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.P2 = 32 * cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.minDisparity = 0;
sgbm.numberOfDisparities = numberOfDisparities;
sgbm.uniquenessRatio = 10;
sgbm.speckleWindowSize = bm.state->speckleWindowSize;
sgbm.speckleRange = bm.state->speckleRange;
sgbm.disp12MaxDiff = 1;
sgbm.fullDP = alg == STEREO_HH;
var.levels = 3; // ignored with USE_AUTO_PARAMS
var.pyrScale = 0.5; // ignored with USE_AUTO_PARAMS
var.nIt = 25;
var.minDisp = -numberOfDisparities;
var.maxDisp = 0;
var.poly_n = 3;
var.poly_sigma = 0.0;
var.fi = 15.0f;
var.lambda = 0.03f;
var.penalization = var.PENALIZATION_TICHONOV; // ignored with USE_AUTO_PARAMS
var.cycle = var.CYCLE_V; // ignored with USE_AUTO_PARAMS
var.flags = var.USE_SMART_ID | var.USE_AUTO_PARAMS | var.USE_INITIAL_DISPARITY | var.USE_MEDIAN_FILTERING;
Mat disp ;
//去黑边
Mat img1p, img2p;
copyMakeBorder(imgL, img1p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
copyMakeBorder(imgR, img2p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
imshow("img1p", img1p);
imshow("img2p", img2p);
int64 t = getTickCount();
if (alg == STEREO_BM)
bm(imgL, imgR, disp);
else if (alg == STEREO_VAR) {
var(imgL, imgR, disp);
}
else if (alg == STEREO_SGBM || alg == STEREO_HH)
sgbm(imgL, imgR, disp);//------
t = getTickCount() - t;
printf("Time elapsed: %fms\n", t * 1000 / getTickFrequency());
dispf = disp.colRange(numberOfDisparities, img2p.cols - numberOfDisparities);
if (alg != STEREO_VAR)
dispf.convertTo(disp8, CV_8U, 255 / (numberOfDisparities*16.));
else
dispf.convertTo(disp8, CV_8U);
}
int main()
{
int alg = STEREO_SGBM;
int color_mode = alg == STEREO_BM ? 0 : -1;
string filename = "calibrateResult.xml";
Mat camL_Matrix, camR_Matrix, camL_distcoeff, camR_distcoeff, R, T;
Mat Rl, Rr, Pl,Pr,Q; //校正旋转矩阵R,投影矩阵P 重投影矩阵Q
Rect validROIL, validROIR;
readCamParam(filename, camL_Matrix, camL_distcoeff, camR_Matrix, camR_distcoeff, R, T);
stereoRectify(camL_Matrix, camL_distcoeff, camR_Matrix, camR_distcoeff, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,
0, imageSize, &validROIL, &validROIR);
Mat mapLx, mapLy, mapRx, mapRy; //映射表
initUndistortRectifyMap(camL_Matrix, camL_distcoeff, Rl, Pl, imageSize, CV_16SC2, mapLx, mapLy);
initUndistortRectifyMap(camR_Matrix, camR_distcoeff, Rr, Pr, imageSize, CV_16SC2, mapRx, mapRy);
Mat imgL = imread("L.bmp", color_mode);
Mat imgR = imread("R.bmp", color_mode);
Mat rectifyImageL, rectifyImageR;
remap(imgL, rectifyImageL, mapLx, mapLy, INTER_LINEAR);
remap(imgR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);
imshow("rectifyImageL", rectifyImageL);
imshow("rectifyImageR", rectifyImageR);
//显示在同一张图上
images2one(imageSize, rectifyImageL, validROIL, rectifyImageR, validROIR);
namedWindow("color_disparity", CV_WINDOW_NORMAL);
setMouseCallback("color_disparity", onMouse, 0);
bool no_display = false;
Mat disp8,dispf;
stereoSGBM_match(alg, imgL, imgR, disp8, dispf);
reprojectImageTo3D(dispf, xyz, Q, true); //在实际求距离时,ReprojectTo3D出来的X / W, Y / W, Z / W都要乘以16(也就是W除以16),才能得到正确的三维坐标信息。
xyz = xyz * 16;
if (!no_display)
{
namedWindow("left", 1);
imshow("left", imgL);
namedWindow("right", 1);
imshow("right", imgR);
namedWindow("disparity", 0);
imshow("disparity", disp8);
Mat color(dispf.size(), CV_8UC3);
GenerateFalseMap(disp8, color);//转成彩图
imshow("color_disparity", color);
waitKey(500);
printf("press any key to continue...");
fflush(stdout);
waitKey();
printf("\n");
}
saveXYZ("xyz.xls", xyz);
return 0;
}
下面为代码的链接:
https://download.csdn.net/download/logan_lin/10345775