接上一篇,得到了stero.xml之后,我们就可以进行双目重建了。
其实也没啥,都是直接调用库函数的。
首先打开两个相机,读出参数,然后计算视差,然后计算坐标,就完了。
完整代码如下:
#include <cv.h>
#include <highgui.h>
#include <iostream>
#include<opencv2\opencv.hpp>
using namespace std;
using namespace cv;
int i = 0; Mat frame;
Mat Left, Right,pointClouds;
Mat mx0, my0, mx1, my1,Q;
Rect rect0, rect1;
cv::Ptr<cv::StereoBM> m_BM = cv::StereoBM::create();
void onMouse(int event, int x, int y, int flags, void* userdata)
{
if (event == 1)
imwrite(to_string(i++) + ".jpg", frame);
}
void LoadData(void)
{
FileStorage fs("stero.xml", cv::FileStorage::READ);
fs ["mx0"] >> mx0;
fs [ "my0"] >> my0;
fs [ "mx1"] >> mx1;
fs [ "my1"] >> my1;
fs["Q"] >> Q;
fs [ "rect0"] >> rect0;
fs [ "rect1"] >> rect1;
fs.release();
//m_BM.state->roi1 = rect0;
//m_BM.state->roi2 = rect1;
//m_BM.state->preFilterCap = 31;
//m_BM.state->SADWindowSize = 9;
//m_BM.state->minDisparity = 0;
//m_BM.state->numberOfDisparities = 16;
//m_BM.state->textureThreshold = 10;
//m_BM.state->uniquenessRatio = 25;
//m_BM.state->speckleWindowSize = 100;
//m_BM.state->speckleRange = 200;
//m_BM.state->disp12MaxDiff = -1;
int unitDisparity = 15;//40
int numberOfDisparities = unitDisparity * 16;
m_BM->setROI1(rect0);
m_BM->setROI2(rect1);
m_BM->setPreFilterCap(31);
m_BM->setBlockSize(9); //SAD窗口大小
m_BM->setMinDisparity(0); //确定匹配搜索从哪里开始 默认值是0
m_BM->setNumDisparities(16); //在该数值确定的视差范围内进行搜索,视差窗口
// 即最大视差值与最小视差值之差, 大小必须是16的整数倍
m_BM->setTextureThreshold(10); //保证有足够的纹理以克服噪声
m_BM->setUniquenessRatio(25); //使用匹配功能模式
m_BM->setSpeckleWindowSize(100); //检查视差连通区域变化度的窗口大小, 值为0时取消 speckle 检查
m_BM->setSpeckleRange(200); // 视差变化阈值,当窗口内视差变化大于阈值时,该窗口内的视差清零
m_BM->setDisp12MaxDiff(-1);
}
void saveDisp(Mat mat)
{
FILE* fp = fopen("a.txt", "wt");
fprintf(fp, "%02d\n", mat.rows);
fprintf(fp, "%02d\n", mat.cols);
for(int y = 0; y < mat.rows; y++)
{
for(int x = 0; x < mat.cols; x++)
{
int disp = (int)mat.at<float>(y, x); // 这里视差矩阵是CV_16S 格式的,故用 short 类型读取
fprintf(fp, "%d\n", disp); // 若视差矩阵是 CV_32F 格式,则用 float 类型读取
}
//fprintf(fp, "\n");
}
fclose(fp);
}
int main()
{
char key;
VideoCapture cap_left(0);
if (!cap_left.isOpened()){ cout << "left unopen"; return -1; }
VideoCapture cap_right(1);
if (!cap_right.isOpened()){ cout << "right unopen"; return -1; }
cap_left.set(CV_CAP_PROP_FRAME_WIDTH, 1920);
cap_left.set(CV_CAP_PROP_FRAME_HEIGHT, 1080);
cap_right.set(CV_CAP_PROP_FRAME_WIDTH, 1920);
cap_right.set(CV_CAP_PROP_FRAME_HEIGHT, 1080);
LoadData();
namedWindow("points", 0);
namedWindow("left", 0);
namedWindow("right", 0);
while (true)
{
cap_left.grab();
cap_right.grab();
cap_left.retrieve(Left);
cap_right.retrieve(Right);
cv::Mat img1proc, img2proc;
cvtColor(Left, img1proc, CV_BGR2GRAY);
cvtColor(Right, img2proc, CV_BGR2GRAY);
// 校正图像,使左右视图行对齐
cv::Mat img1remap, img2remap;
// remap(img1proc, img1remap, mx0, my0, cv::INTER_LINEAR); // 对用于视差计算的画面进行校正
// remap(img2proc, img2remap, mx1, my1, cv::INTER_LINEAR);
// 对左右视图的左边进行边界延拓,以获取与原始视图相同大小的有效视差区域
cv::Mat img1border, img2border;
copyMakeBorder(img1proc, img1border, 0, 0, m_BM->getNumDisparities(), 0, IPL_BORDER_REPLICATE);
copyMakeBorder(img1proc, img2border, 0, 0, m_BM->getNumDisparities(), 0, IPL_BORDER_REPLICATE);
// 计算视差
cv::Mat dispBorder,disp;
m_BM->compute(img1border, img2border, dispBorder); dispBorder.convertTo(dispBorder, CV_32F);
dispBorder.convertTo(disp, CV_8U, 255 / (16 * 16.));
imshow("disp", dispBorder);
saveDisp(dispBorder);
waitKey(20);
// remap(Left, Left, mx0, my0, cv::INTER_LINEAR);
rectangle(Left, rect0, CV_RGB(0, 0, 255), 3);
// remap(Right, Right, mx1, my1, cv::INTER_LINEAR);
rectangle(Right, rect1, CV_RGB(0, 0, 255), 3);
reprojectImageTo3D(dispBorder, pointClouds, Q, true);
for (int y = 0; y < pointClouds.rows; ++y)
{
for (int x = 0; x < pointClouds.cols; ++x)
{
cv::Point3f point = pointClouds.at<cv::Point3f>(y, x);
point.y = -point.y;
pointClouds.at<cv::Point3f>(y, x) = point;
}
}
pointClouds.convertTo(disp, CV_8U);
frame = Mat::zeros(pointClouds.rows, pointClouds.cols, CV_8UC1);
for (int y = 0; y<pointClouds.rows; y++)
{
for (int x = 0; x<pointClouds.cols; x++)
{
frame.at<uchar>(y, x) = pointClouds.at<cv::Point3f>(y, x).z;
}
}
imshow("left", Left);
imshow("right", Right);
imshow("points", frame);
waitKey(20);
cout << i++ << endl;
//Mat temp; frame.copyTo(temp);
//if (i==3)
//for (int y = 0; y<pointClouds.rows; y+=10)
//{
// for (int x = 0; x<pointClouds.cols; x+=10)
// {
// cout << (float)temp.at<uchar>(y, x)<<endl;
// waitKey();
// }
//}
}
return 0;
}
但是,精度受到立体匹配的影响,误差很大,上面的匹配方法是BM法,参数有待改进。同时全局匹配代价很大,比较慢。
下面我使用了一种简单的方法,对空间中的棋盘进行识别,只匹配棋盘上的一点,计算其视差和坐标,得到的效果还是比较好的。
#include <cv.h>
#include <highgui.h>
#include <iostream>
using namespace std;
using namespace cv;
#define A 1
#define B 1
Mat frame(1080, 1920 * 2, CV_8UC3);
Size grids(9, 6);
Mat Left, Right;
vector<Point2f> corner1, corner2;
int main()
{
Mat Q, out, dist, P0, P1,out2;
//Q.create(4, 4,CV_32FC1);
out.create(4, 1, CV_32FC1);
dist.create(4, 1, CV_32FC1);
FileStorage q("stero.xml", FileStorage::READ);
q["Q"] >> Q; q["P0"] >> P0; q["P1"] >> P1;
cout << Q<<endl;
cout << P0 << endl;
cout << P1 << endl;
//Q.at<double>(3, 2) = -Q.at<double>(3, 2);
Q.convertTo(Q, CV_32FC1);
P0.convertTo(P0, CV_32FC1);
P1.convertTo(P1, CV_32FC1);
q.release();
Mat show(1080/2,1920,CV_8UC3);
VideoCapture cap_left(2);
if (!cap_left.isOpened()){ cout << "left unopen"; return -1; }
VideoCapture cap_right(0);
if (!cap_right.isOpened()){ cout << "right unopen"; return -1; }
cap_left.set(CV_CAP_PROP_FRAME_WIDTH, 1920);
cap_left.set(CV_CAP_PROP_FRAME_HEIGHT, 1080);
cap_right.set(CV_CAP_PROP_FRAME_WIDTH, 1920);
cap_right.set(CV_CAP_PROP_FRAME_HEIGHT, 1080);
char key;
namedWindow("frame", 0);
while (1)
{
cap_left.grab();
cap_right.grab();
cap_left.retrieve(Left);
cap_right.retrieve(Right);
bool a=findChessboardCorners(Left, grids, corner1, CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE + CALIB_CB_FAST_CHECK);
bool b=findChessboardCorners(Right, grids, corner2, CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE + CALIB_CB_FAST_CHECK);
if (a&&b)
{
circle(Left, corner1[0], 50, { 255, 0, 0 }, 2);
circle(Right, corner2[0], 50, { 255, 0, 0 }, 2);
if(A)
{
triangulatePoints(P0, P1, corner1, corner2, out2);
//out = out2.colRange(0, 0);
putText(Left, "X:" + to_string(out2.at<float>(0, 0) / out2.at<float>(3, 0)), { 10, 50 }, 0, 1.0f, CV_RGB(255, 0, 0), 2);
putText(Left, "Y:" + to_string(out2.at<float>(1, 0) / out2.at<float>(3, 0)), { 10, 100 }, 0, 1.0f, CV_RGB(0, 255, 0), 2);
putText(Left, "Z:" + to_string(out2.at<float>(2, 0) / out2.at<float>(3, 0)), { 10, 150 }, 0, 1.0f, CV_RGB(0, 0, 255), 2);
}
if (B)
{
dist.at<float>(0, 0) = corner1[0].x; dist.at<float>(1, 0) = corner1[0].y; dist.at<float>(2, 0) = corner1[0].x - corner2[0].x; dist.at<float>(3, 0) = 1;
cout << dist << endl;
out = Q*dist;
cout << out << endl;
out = out / out.at<float>(3,0);
cout << out << endl;
if (out.isContinuous())
{
putText(Left, "X:" + to_string(((float*)out.data)[0]), { 10, 200 }, 0, 1.0f, CV_RGB(255, 0, 0), 2);
putText(Left, "Y:" + to_string(((float*)out.data)[1]), { 10, 250 }, 0, 1.0f, CV_RGB(0, 255, 0), 2);
putText(Left, "Z:" + to_string(((float*)out.data)[2]), { 10, 300 }, 0, 1.0f, CV_RGB(0, 0, 255), 2);
}
}
if (C)
{
dist.at<float>(0, 0) = corner1[0].x; dist.at<float>(1, 0) = corner1[0].y; dist.at<float>(2, 0) = corner1[0].x - corner2[0].x; dist.at<float>(3, 0) = 1;
//cout << dist << endl;
tempQ.at<uchar>(0, 0) = (uchar)dist.at<float>(2, 0);
reprojectImageTo3D(tempQ, tempQ2, Q);
//out = out / out.data[3];
putText(Left, "X:" + to_string(tempQ2.at<Vec3f>(0, 0)[0]), { 10, 200 + 150 }, 0, 1.0f, CV_RGB(255, 0, 0), 2);
putText(Left, "Y:" + to_string(tempQ2.at<Vec3f>(0, 0)[1]), { 10, 250 + 150 }, 0, 1.0f, CV_RGB(0, 255, 0), 2);
putText(Left, "Z:" + to_string(tempQ2.at<Vec3f>(0, 0)[2]), { 10, 300 + 150 }, 0, 1.0f, CV_RGB(0, 0, 255), 2);
}
}
Left.copyTo(frame(Rect(0, 0, 1920, 1080)));
Right.copyTo(frame(Rect(1920, 0, 1920, 1080)));
resize(frame,show,Size(1920 , 1080/2 ));
key = waitKey(20);
if (key == ' ')return 0;
imshow("frame", show);
// imshow("frame", Left);
// imshow("frame2", Right);
}
return 0;
}
上面使用了ABC三种方法对坐标进行结算,其根本原理都是B所示的操作。这里就不介绍了。