1 双目视觉
何为双目视觉?
双目视觉是模拟人类视觉原理,使用计算机被动感知距离的方法。从两个或者多个点观察一个物体,获取在不同视觉下的图像,根据图像之间像素的匹配关系,通过三角测量原理计算出像素之间的偏移来获取物体的三维信息。得到了物体的景深信息,就可以计算出物体与相机之间的实际距离,物体3维大小,两点之间实际距离。目前也有很多研究机构进行3维物体识别,来解决2D算法无法处理遮挡,姿态变化的问题,提高物体的识别率。
2 实验环境
VS2013
OpenCV3.2.0
双目摄像头 直接购买两个普通的usb摄像头或购买双目摄像头
标定板 淘宝购买或自行打印
参考 http://blog.csdn.net/lonelyrains/article/details/46874723
或者 http://blog.csdn.net/loser__wang/article/details/51811347
3 双目摄像机读取
双目摄像机读取代码很简单, 但针对不同的摄像头可能需要稍微调试一下,遇到问题可以参考一下http://blog.csdn.net/vampireshj/article/details/53535724
,然而我的双目摄像头使用上文中的两种方法都没有成功,以下是我修改后的代码。我把两个摄像头的分辨率都改了一下。这个得看具体摄像头的支持程度,有的无法改,你改了也没效果。
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std
using namespace cv
int main(){
Mat frame_l, frame_r
VideoCapture camera_l, camera_r
int cont = 0
while (frame_l.rows < 2){
camera_l.open(0)
camera_l.set(CV_CAP_PROP_FOURCC, 'GPJM')
camera_l.set(CV_CAP_PROP_FRAME_WIDTH, 320)
camera_l.set(CV_CAP_PROP_FRAME_HEIGHT, 240)
cont = 0
while (frame_l.rows < 2 && cont < 5){
camera_l >> frame_l
cont++
}
}
while (frame_r.rows < 2){
camera_r.open(1)
camera_r.set(CV_CAP_PROP_FOURCC, 'GPJM')
camera_r.set(CV_CAP_PROP_FRAME_WIDTH, 320)
camera_r.set(CV_CAP_PROP_FRAME_HEIGHT, 240)
cont = 0
while (frame_r.rows < 2 && cont < 5){
camera_r >> frame_r
cont++
}
}
while (true)
{
camera_l >> frame_l
camera_r >> frame_r
imshow("camera_l", frame_l)
imshow("camera_r", frame_r)
waitKey(60)
}
return 0
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
实验效果如下:
注意到,右摄像头的图像相对于左摄像头的图像有点“左移”。这点自己分析一下原因。很重要,如果不是这项,你下面的工作会白做。因为匹配的算法就是遵循这种“左移”的。
4 标定环节
基本上的流程就是读取左右摄像头,分别检测棋盘的角点,当同时都检测到完整的角点之后,进行精细化处理,得到更精确的角点并存储。攒够一定数量之后(20-30)之后进行参数计算。并将参数进行存储。还是直接上代码。并说明一些实现的细节部分。
ChessboardStable是用来检测棋盘格是否稳定的。
方案一:如果你的双目摄像头是用手拿着的,或多或少会有一些抖动,这样如果只是检测是否存在角点,可能会通过不是很清晰稳定的图像进行分析,这样会带来比较大的误差,如果通过一个队列判断是否稳定,则可以避免这种误差。 我是简单粗暴的使用vector代替队列的。
后面的部分需要注意的就是 boardSize, squareSize需要设置为你的标定板对应的尺寸,我拿A4纸简单的打印一份,每个格子的大小经过测量是26mm ,你可以根据自己的标定板进行相应的设置。
#include <string>
#include <stdio.h>
#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
using namespace cv;
vector<vector<Point2f> >corners_l_array, corners_r_array;
int array_index = 0;
bool ChessboardStable(vector<Point2f>corners_l, vector<Point2f>corners_r){
if (corners_l_array.size() < 10){
corners_l_array.push_back(corners_l);
corners_r_array.push_back(corners_r);
return false;
}
else{
corners_l_array[array_index % 10] = corners_l;
corners_r_array[array_index % 10] = corners_r;
array_index++;
double error = 0.0;
for (int i = 0; i < corners_l_array.size(); i++){
for (int j = 0; j < corners_l_array[i].size(); j++){
error += abs(corners_l[j].x - corners_l_array[i][j].x) + abs(corners_l[j].y - corners_l_array[i][j].y);
error += abs(corners_r[j].x - corners_r_array[i][j].x) + abs(corners_r[j].y - corners_r_array[i][j].y);
}
}
if (error < 1000)
{
corners_l_array.clear();
corners_r_array.clear();
array_index = 0;
return true;
}
else
return false;
}
}
int main(){
VideoCapture camera_l, camera_r;
Mat frame_l, frame_r;
int cont = 0;
while (frame_l.rows < 2){
camera_l.open(0);
camera_l.set(CV_CAP_PROP_FOURCC, 'GPJM');
camera_l.set(CV_CAP_PROP_FRAME_WIDTH, 320);
camera_l.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
cont = 0;
while (frame_l.rows < 2 && cont < 5){
camera_l >> frame_l;
cont++;
}
}
while (frame_r.rows < 2){
camera_r.open(1);
camera_r.set(CV_CAP_PROP_FOURCC, 'GPJM');
camera_r.set(CV_CAP_PROP_FRAME_WIDTH, 320);
camera_r.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
cont = 0;
while (frame_r.rows < 2 && cont < 5){
camera_r >> frame_r;
cont++;
}
}
Size boardSize(9, 6);
const float squareSize = 26.f;
vector<vector<Point2f> > imagePoints_l;
vector<vector<Point2f> > imagePoints_r;
int nimages = 0;
while (true)
{
camera_l >> frame_l;
camera_r >> frame_r;
bool found_l = false, found_r = false;
vector<Point2f> corners_l, corners_r;
found_l = findChessboardCorners(frame_l, boardSize, corners_l, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
found_r = findChessboardCorners(frame_r, boardSize, corners_r, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
if (found_l && found_r && ChessboardStable(corners_l, corners_r)) {
Mat viewGray;
cvtColor(frame_l, viewGray, COLOR_BGR2GRAY);
cornerSubPix(viewGray, corners_l, Size(11, 11),
Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
cvtColor(frame_r, viewGray, COLOR_BGR2GRAY);
cornerSubPix(viewGray, corners_r, Size(11, 11),
Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
imagePoints_l.push_back(corners_l);
imagePoints_r.push_back(corners_r);
++nimages;
frame_l += 100;
frame_r += 100;
drawChessboardCorners(frame_l, boardSize, corners_l, found_l);
drawChessboardCorners(frame_r, boardSize, corners_r, found_r);
putText(frame_l, to_string(nimages), Point(20, 20), 1, 1, Scalar(0, 0, 255));
putText(frame_r, to_string(nimages), Point(20, 20), 1, 1, Scalar(0, 0, 255));
imshow("Left Camera", frame_l);
imshow("Right Camera", frame_r);
char c = (char)waitKey(500);
if (c == 27 || c == 'q' || c == 'Q')
exit(-1);
if (nimages >= 30)
break;
}else{
drawChessboardCorners(frame_l, boardSize, corners_l, found_l);
drawChessboardCorners(frame_r, boardSize, corners_r, found_r);
putText(frame_l, to_string(nimages), Point(20, 20), 1, 1, Scalar(0, 0, 255));
putText(frame_r, to_string(nimages), Point(20, 20), 1, 1, Scalar(0, 0, 255));
imshow("Left Camera", frame_l);
imshow("Right Camera", frame_r);
char key = waitKey(1);
if (key == 27)
break;
}
}
if (nimages < 20){ cout << "Not enough" << endl; return -1;}
vector<vector<Point2f> > imagePoints[2] = { imagePoints_l, imagePoints_r };
vector<vector<Point3f> > objectPoints;
objectPoints.resize(nimages);
for(int i = 0; i < nimages; i++)
{
for (int j = 0; j < boardSize.height; j++)
for (int k = 0; k < boardSize.width; k++)
objectPoints[i].push_back(Point3f(k*squareSize, j*squareSize, 0));
}
cout << "Running stereo calibration ..." << endl;
Size imageSize(320, 240);
Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = initCameraMatrix2D(objectPoints, imagePoints_l, imageSize, 0);
cameraMatrix[1] = initCameraMatrix2D(objectPoints, imagePoints_r, imageSize, 0);
Mat R, T, E, F;
double rms = stereoCalibrate(objectPoints, imagePoints_l, imagePoints_r,
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, E, F,
CV_CALIB_FIX_ASPECT_RATIO +
CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_USE_INTRINSIC_GUESS +
CV_CALIB_SAME_FOCAL_LENGTH +
CV_CALIB_RATIONAL_MODEL +
CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5,
TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, 1e-5));
cout << "done with RMS error=" << rms << endl;
double err = 0;
int npoints = 0;
vector<Vec3f> lines[2];
for (int i = 0; i < nimages; i++)
{
int npt = (int)imagePoints_l[i].size();
Mat imgpt[2];
imgpt[0] = Mat(imagePoints_l[i]);
undistortPoints(imgpt[0], imgpt[0], cameraMatrix[0], distCoeffs[0], Mat(), cameraMatrix[0]);
computeCorrespondEpilines(imgpt[0], 0 + 1, F, lines[0]);
imgpt[1] = Mat(imagePoints_r[i]);
undistortPoints(imgpt[1], imgpt[1], cameraMatrix[1], distCoeffs[1], Mat(), cameraMatrix[1]);
computeCorrespondEpilines(imgpt[1], 1 + 1, F, lines[1]);
for (int j = 0; j < npt; j++)
{
double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
fabs(imagePoints[1][i][j].x*lines[0][j][0] +
imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
err += errij;
}
npoints += npt;
}
cout << "average epipolar err = " << err / npoints << endl;
FileStorage fs("intrinsics.yml", FileStorage::WRITE);
if (fs.isOpened())
{
fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
"M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
fs.release();
}
else
cout << "Error: can not save the intrinsic parameters\n";
Mat R1, R2, P1, P2, Q;
Rect validRoi[2];
stereoRectify(cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, R1, R2, P1, P2, Q,
CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
fs.open("extrinsics.yml", FileStorage::WRITE);
if (fs.isOpened())
{
fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
fs.release();
}
else
cout << "Error: can not save the extrinsic parameters\n";
return 0;
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137
- 138
- 139
- 140
- 141
- 142
- 143
- 144
- 145
- 146
- 147
- 148
- 149
- 150
- 151
- 152
- 153
- 154
- 155
- 156
- 157
- 158
- 159
- 160
- 161
- 162
- 163
- 164
- 165
- 166
- 167
- 168
- 169
- 170
- 171
- 172
- 173
- 174
- 175
- 176
- 177
- 178
- 179
- 180
- 181
- 182
- 183
- 184
- 185
- 186
- 187
- 188
- 189
- 190
- 191
- 192
- 193
- 194
- 195
- 196
- 197
- 198
- 199
- 200
- 201
- 202
- 203
- 204
- 205
- 206
- 207
- 208
- 209
- 210
- 211
- 212
- 213
- 214
- 215
- 216
- 217
- 218
- 219
- 220
- 221
- 222
- 223
- 224
- 225
- 226
- 227
- 228
- 229
- 230
- 231
注意1:该程序我在x64 debug模式下会出现assert错误,release下没有问题。有人解决请赐教!
注意2:标定的时候把各个方向,大小都照顾到。
重要函数说明
(1) findChessboardCorners()函数 在图像中找到指定大小的棋盘图案
函数原型:
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize,
OutputArray corners,
int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE );
image 输入的棋盘图,必须是8位的灰度或者彩色图像。
pattern_size 棋盘图中每行和每列角点的个数。
corners 检测到的角点
flags 各种操作标志,可以是0或者下面值的组合:
CV_CALIB_CB_ADAPTIVE_THRESH -使用自适应阈值(通过平均图像亮度计算得到)将图像转换为黑白图,而不是一个固定的阈值。
CV_CALIB_CB_NORMALIZE_IMAGE -在利用固定阈值或者自适应的阈值进行二值化之前,先使用cvNormalizeHist来均衡化图像亮度。
CV_CALIB_CB_FILTER_QUADS -使用其他的准则(如轮廓面积,周长,方形形状)来去除在轮廓检测阶段检测到的错误方块。
补充说明
函数cvFindChessboardCorners试图确定输入图像是否是棋盘模式,并确定角点的位置。如果所有角点都被检测到且它们都被以一定顺序排布,函数返回非零值,否则在函数不能发现所有角点或者记录它们地情况下,函数返回0。例如一个正常地棋盘图右8x8个方块和7x7个内角点,内角点是黑色方块相互联通的位置。这个函数检测到地坐标只是一个大约的值,如果要精确地确定它们的位置,可以使用函数cvFindCornerSubPix。
(2) cornerSubPix()函数 角点检测中精确化角点位置,从而取得亚像素级别的角点检测效果。
函数原型:
//! adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria
CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners,
Size winSize, Size zeroZone,
TermCriteria criteria );
image:输入图像(8位或32位单通道图)
corners:检测到的角点,即是输入也是输出
winSize:计算亚像素角点时考虑的区域的大小,大小为NXN; N=(winSize*2+1)。
zeroZone:作用类似于winSize,但是总是具有较小的范围,通常忽略(即Size(-1, -1))。
criteria:用于表示计算亚像素时停止迭代的标准,可选的值有cv::TermCriteria::MAX_ITER 、cv::TermCriteria::EPS(可以是两者其一,或两者均选),前者表示迭代次数达到了最大次数时停止,后者表示角点位置变化的最小值已经达到最小时停止迭代。二者均使用cv::TermCriteria()构造函数进行指定。
(3) drawChessboardCorners()函数 棋盘格角点的绘制
CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
InputArray corners, bool patternWasFound );
image:棋盘格图像(8UC3)
patternSize:棋盘格内部角点的行、列,和cv::findChessboardCorners()指定的相同
corners:检测到的棋盘格角点
patternWasFound:cv::findChessboardCorners()的返回值
(4) stereoCalibrate()函数 找到立体相机的内在和外在参数
Similarly to calibrateCamera , the function minimizes the total re-projection error for all the
points in all the available views from both cameras. The function returns the final value of the
re-projection error.
*/
CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,
InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2,
Size imageSize, OutputArray R,OutputArray T, OutputArray E, OutputArray F,
int flags = CALIB_FIX_INTRINSIC,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) );
objectPoints:校正的图像点向量组
imagePoints1:通过第一台相机观测到的图像上面的向量组.
imagePoints2:通过第二台相机观测到的图像上面的向量组.
cameraMatrix1:输入或者输出第一个相机的内参数矩阵
distCoeffs1:输入/输出第一个相机的畸变系数向量
cameraMatrix2:输入或者输出第二个相机的内参数矩阵
distCoeffs2:输入/输出第二个相机的畸变系数向量
imageSize:图像文件的大小——只用于初始化相机内参数矩阵
R:输出第一和第二相机坐标系之间的旋转矩阵。
T:输出第一和第二相机坐标系之间的旋转矩阵平移向量
E:输出本征矩阵
F:输出基础矩阵
flags:不同的FLAG,可能是零或以下值的结合,参考http://www.baike.com/wiki/stereoCalibrate
criteria:迭代优化算法终止的标准
5 立体匹配
此处直接上完整代码
#include <string>
#include <stdio.h>
#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
using namespace cv;
vector<vector<Point2f> >corners_l_array, corners_r_array;
int array_index = 0;
bool ChessboardStable(vector<Point2f>corners_l, vector<Point2f>corners_r){
if (corners_l_array.size() < 10){
corners_l_array.push_back(corners_l);
corners_r_array.push_back(corners_r);
return false;
}
else{
corners_l_array[array_index % 10] = corners_l;
corners_r_array[array_index % 10] = corners_r;
array_index++;
double error = 0.0;
for (int i = 0; i < corners_l_array.size(); i++){
for (int j = 0; j < corners_l_array[i].size(); j++){
error += abs(corners_l[j].x - corners_l_array[i][j].x) + abs(corners_l[j].y - corners_l_array[i][j].y);
error += abs(corners_r[j].x - corners_r_array[i][j].x) + abs(corners_r[j].y - corners_r_array[i][j].y);
}
}
if (error < 1000)
{
corners_l_array.clear();
corners_r_array.clear();
array_index = 0;
return true;
}
else
return false;
}
}
int main(){
VideoCapture camera_l, camera_r;
Mat frame_l, frame_r;
int cont = 0;
while (frame_l.rows < 2){
camera_l.open(0);
camera_l.set(CV_CAP_PROP_FOURCC, 'GPJM');
camera_l.set(CV_CAP_PROP_FRAME_WIDTH, 320);
camera_l.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
cont = 0;
while (frame_l.rows < 2 && cont < 5){
camera_l >> frame_l;
cont++;
}
}
while (frame_r.rows < 2){
camera_r.open(1);
camera_r.set(CV_CAP_PROP_FOURCC, 'GPJM');
camera_r.set(CV_CAP_PROP_FRAME_WIDTH, 320);
camera_r.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
cont = 0;
while (frame_r.rows < 2 && cont < 5){
camera_r >> frame_r;
cont++;
}
}
Size boardSize(9, 6);
const float squareSize = 26.f;
vector<Mat> goodFrame_l;
vector<Mat> goodFrame_r;
vector<vector<Point2f> > imagePoints_l;
vector<vector<Point2f> > imagePoints_r;
vector<vector<Point3f> > objectPoints;
int nimages = 0;
while (true){
camera_l >> frame_l;
camera_r >> frame_r;
bool found_l = false, found_r = false;
vector<Point2f>corners_l, corners_r;
found_l = findChessboardCorners(frame_l, boardSize, corners_l,
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
found_r = findChessboardCorners(frame_r, boardSize, corners_r,
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
if (found_l && found_r &&ChessboardStable(corners_l, corners_r)){
goodFrame_l.push_back(frame_l);
goodFrame_r.push_back(frame_r);
Mat viewGray;
cvtColor(frame_l, viewGray, COLOR_BGR2GRAY);
cornerSubPix(viewGray, corners_l, Size(11, 11),
Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
cvtColor(frame_r, viewGray, COLOR_BGR2GRAY);
cornerSubPix(viewGray, corners_r, Size(11, 11),
Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
imagePoints_l.push_back(corners_l);
imagePoints_r.push_back(corners_r);
++nimages;
frame_l += 100;
frame_r += 100;
drawChessboardCorners(frame_l, boardSize, corners_l, found_l);
drawChessboardCorners(frame_r, boardSize, corners_r, found_r);
putText(frame_l, to_string(nimages), Point(20, 20), 1, 1, Scalar(0, 0, 255));
putText(frame_r, to_string(nimages), Point(20, 20), 1, 1, Scalar(0, 0, 255));
imshow("Left Camera", frame_l);
imshow("Right Camera", frame_r);
char c = (char)waitKey(500);
if (c == 27 || c == 'q' || c == 'Q')
exit(-1);
if (nimages >= 30)
break;
}
else{
drawChessboardCorners(frame_l, boardSize, corners_l, found_l);
drawChessboardCorners(frame_r, boardSize, corners_r, found_r);
putText(frame_l, to_string(nimages), Point(20, 20), 1, 1, Scalar(0, 0, 255));
putText(frame_r, to_string(nimages), Point(20, 20), 1, 1, Scalar(0, 0, 255));
imshow("Left Camera", frame_l);
imshow("Right Camera", frame_r);
char key = waitKey(1);
if (key == 27)
break;
}
}
if (nimages < 20){ cout << "Not enough" << endl; return -1; }
vector<vector<Point2f> > imagePoints[2] = { imagePoints_l, imagePoints_r };
objectPoints.resize(nimages);
for (int i = 0; i < nimages; i++)
{
for (int j = 0; j < boardSize.height; j++)
for (int k = 0; k < boardSize.width; k++)
objectPoints[i].push_back(Point3f(k*squareSize, j*squareSize, 0));
}
cout << "Running stereo calibration ..." << endl;
Size imageSize(320, 240);
Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = initCameraMatrix2D(objectPoints, imagePoints_l, imageSize, 0);
cameraMatrix[1] = initCameraMatrix2D(objectPoints, imagePoints_r, imageSize, 0);
Mat R, T, E, F;
double rms = stereoCalibrate(objectPoints, imagePoints_l, imagePoints_r,
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, E, F,
CALIB_FIX_ASPECT_RATIO +
CALIB_ZERO_TANGENT_DIST +
CALIB_USE_INTRINSIC_GUESS +
CALIB_SAME_FOCAL_LENGTH +
CALIB_RATIONAL_MODEL +
CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5,
TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, 1e-5));
cout << "done with RMS error=" << rms << endl;
double err = 0;
int npoints = 0;
vector<Vec3f> lines[2];
for (int i = 0; i < nimages; i++)
{
int npt = (int)imagePoints_l[i].size();
Mat imgpt[2];
imgpt[0] = Mat(imagePoints_l[i]);
undistortPoints(imgpt[0], imgpt[0], cameraMatrix[0], distCoeffs[0], Mat(), cameraMatrix[0]);
computeCorrespondEpilines(imgpt[0], 0 + 1, F, lines[0]);
imgpt[1] = Mat(imagePoints_r[i]);
undistortPoints(imgpt[1], imgpt[1], cameraMatrix[1], distCoeffs[1], Mat(), cameraMatrix[1]);
computeCorrespondEpilines(imgpt[1], 1 + 1, F, lines[1]);
for (int j = 0; j < npt; j++)
{
double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
fabs(imagePoints[1][i][j].x*lines[0][j][0] +
imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
err += errij;
}
npoints += npt;
}
cout << "average epipolar err = " << err / npoints << endl;
FileStorage fs("intrinsics.yml", FileStorage::WRITE);
if (fs.isOpened())
{
fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
"M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
fs.release();
}
else
cout << "Error: can not save the intrinsic parameters\n";
Mat R1, R2, P1, P2, Q;
Rect validRoi[2];
stereoRectify(cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, R1, R2, P1, P2, Q,
CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
fs.open("extrinsics.yml", FileStorage::WRITE);
if (fs.isOpened())
{
fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
fs.release();
}
else
cout << "Error: can not save the extrinsic parameters\n";
bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));
Mat rmap[2][2];
initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
Mat canvas;
double sf;
int w, h;
if (!isVerticalStereo)
{
sf = 600. / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h, w * 2, CV_8UC3);
}
else
{
sf = 300. / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h * 2, w, CV_8UC3);
}
destroyAllWindows();
Mat imgLeft, imgRight;
int ndisparities = 16 * 5;
int SADWindowSize = 31;
Ptr<StereoBM> sbm = StereoBM::create(ndisparities, SADWindowSize);
sbm->setMinDisparity(0);
sbm->setTextureThreshold(10);
sbm->setDisp12MaxDiff(-1);
sbm->setPreFilterCap(31);
sbm->setUniquenessRatio(25);
sbm->setSpeckleRange(32);
sbm->setSpeckleWindowSize(100);
Ptr<StereoSGBM> sgbm = StereoSGBM::create(0, 64, 7,
10 * 7 * 7,
40 * 7 * 7,
1, 63, 10, 100, 32, StereoSGBM::MODE_SGBM);
Mat rimg, cimg;
Mat Mask;
while (true)
{
camera_l >> frame_l;
camera_r >> frame_r;
if (frame_l.empty() || frame_r.empty())
continue;
remap(frame_l, rimg, rmap[0][0], rmap[0][1], INTER_LINEAR);
rimg.copyTo(cimg);
Mat canvasPart1 = !isVerticalStereo ? canvas(Rect(w * 0, 0, w, h)) : canvas(Rect(0, h * 0, w, h));
resize(cimg, canvasPart1, canvasPart1.size(), 0, 0, INTER_AREA);
Rect vroi1(cvRound(validRoi[0].x*sf), cvRound(validRoi[0].y*sf),
cvRound(validRoi[0].width*sf), cvRound(validRoi[0].height*sf));
remap(frame_r, rimg, rmap[1][0], rmap[1][1], INTER_LINEAR);
rimg.copyTo(cimg);
Mat canvasPart2 = !isVerticalStereo ? canvas(Rect(w * 1, 0, w, h)) : canvas(Rect(0, h * 1, w, h));
resize(cimg, canvasPart2, canvasPart2.size(), 0, 0, INTER_AREA);
Rect vroi2 = Rect(cvRound(validRoi[1].x*sf), cvRound(validRoi[1].y*sf),
cvRound(validRoi[1].width*sf), cvRound(validRoi[1].height*sf));
Rect vroi = vroi1&vroi2;
imgLeft = canvasPart1(vroi).clone();
imgRight = canvasPart2(vroi).clone();
rectangle(canvasPart1, vroi1, Scalar(0, 0, 255), 3, 8);
rectangle(canvasPart2, vroi2, Scalar(0, 0, 255), 3, 8);
if (!isVerticalStereo)
for (int j = 0; j < canvas.rows; j += 32)
line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
else
for (int j = 0; j < canvas.cols; j += 32)
line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
cvtColor(imgLeft, imgLeft, CV_BGR2GRAY);
cvtColor(imgRight, imgRight, CV_BGR2GRAY);
Mat imgDisparity16S = Mat(imgLeft.rows, imgLeft.cols, CV_16S);
Mat imgDisparity8U = Mat(imgLeft.rows, imgLeft.cols, CV_8UC1);
Mat sgbmDisp16S = Mat(imgLeft.rows, imgLeft.cols, CV_16S);
Mat sgbmDisp8U = Mat(imgLeft.rows, imgLeft.cols, CV_8UC1);
if (imgLeft.empty() || imgRight.empty())
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
sbm->compute(imgLeft, imgRight, imgDisparity16S);
imgDisparity16S.convertTo(imgDisparity8U, CV_8UC1, 255.0 / 1000.0);
cv::compare(imgDisparity16S, 0, Mask, CMP_GE);
applyColorMap(imgDisparity8U, imgDisparity8U, COLORMAP_HSV);
Mat disparityShow;
imgDisparity8U.copyTo(disparityShow, Mask);
sgbm->compute(imgLeft, imgRight, sgbmDisp16S);
sgbmDisp16S.convertTo(sgbmDisp8U, CV_8UC1, 255.0 / 1000.0);
cv::compare(sgbmDisp16S, 0, Mask, CMP_GE);
applyColorMap(sgbmDisp8U, sgbmDisp8U, COLORMAP_HSV);
Mat sgbmDisparityShow;
sgbmDisp8U.copyTo(sgbmDisparityShow, Mask);
imshow("bmDisparity", disparityShow);
imshow("sgbmDisparity", sgbmDisparityShow);
imshow("rectified", canvas);
char c = (char)waitKey(1);
if (c == 27 || c == 'q' || c == 'Q')
break;
}
return 0;
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137
- 138
- 139
- 140
- 141
- 142
- 143
- 144
- 145
- 146
- 147
- 148
- 149
- 150
- 151
- 152
- 153
- 154
- 155
- 156
- 157
- 158
- 159
- 160
- 161
- 162
- 163
- 164
- 165
- 166
- 167
- 168
- 169
- 170
- 171
- 172
- 173
- 174
- 175
- 176
- 177
- 178
- 179
- 180
- 181
- 182
- 183
- 184
- 185
- 186
- 187
- 188
- 189
- 190
- 191
- 192
- 193
- 194
- 195
- 196
- 197
- 198
- 199
- 200
- 201
- 202
- 203
- 204
- 205
- 206
- 207
- 208
- 209
- 210
- 211
- 212
- 213
- 214
- 215
- 216
- 217
- 218
- 219
- 220
- 221
- 222
- 223
- 224
- 225
- 226
- 227
- 228
- 229
- 230
- 231
- 232
- 233
- 234
- 235
- 236
- 237
- 238
- 239
- 240
- 241
- 242
- 243
- 244
- 245
- 246
- 247
- 248
- 249
- 250
- 251
- 252
- 253
- 254
- 255
- 256
- 257
- 258
- 259
- 260
- 261
- 262
- 263
- 264
- 265
- 266
- 267
- 268
- 269
- 270
- 271
- 272
- 273
- 274
- 275
- 276
- 277
- 278
- 279
- 280
- 281
- 282
- 283
- 284
- 285
- 286
- 287
- 288
- 289
- 290
- 291
- 292
- 293
- 294
- 295
- 296
- 297
- 298
- 299
- 300
- 301
- 302
- 303
- 304
- 305
- 306
- 307
- 308
- 309
- 310
- 311
- 312
- 313
- 314
- 315
- 316
- 317
- 318
- 319
- 320
- 321
- 322
- 323
- 324
- 325
- 326
- 327
- 328
- 329
- 330
- 331
- 332
- 333
- 334
- 335
- 336
- 337
- 338
- 339
- 340
- 341
- 342
- 343
- 344
- 345
- 346
- 347
- 348
- 349
- 350
- 351
- 352
- 353
- 354
- 355
- 356
- 357
- 358
- 359
- 360
- 361
- 362
- 363
- 364
- 365
- 366
- 367
- 368
- 369
- 370
- 371
- 372
- 373
- 374
- 375
- 376
以下为实验效果:
主要模块探讨:
OpenCV中包括三种立体匹配求视差图算法分别为:StereoBM、StereoSGBM和StereoVar。
三种匹配算法总结:http://blog.csdn.net/wqvbjhc/article/details/6260844
OpenCV中对BM算法的使用进行了更新。 在OpenCV3中,StereoBM算法发生了比较大的变化,StereoBM被定义为纯虚类,因此不能直接实例化,只能用智能指针的形式实例化,也不用StereoBMState类来设置了,而是改成用bm->set…的形式,详细看如下代码。
cv::Ptr<cv::StereoBM> bm = cv::StereoBM::create();
int unitDisparity = 15;
int numberOfDisparities = unitDisparity * 16;
bm->setROI1(roi1);
bm->setROI2 (roi2);
bm->setPreFilterCap(13);
bm->setBlockSize = 15;
bm->setMinDisparity(0);
bm->setNumDisparities(numberOfDisparities) ;
bm->setTextureThreshold(1000);
bm->setUniquenessRatio(1);
bm->setSpeckleWindowSize(200);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(-1);
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
References:
http://blog.csdn.net/vampireshj/article/details/53535724
http://blog.csdn.net/taily_duan/article/details/52165458
http://blog.csdn.net/zilanpotou182/article/details/72329903
http://blog.csdn.net/ailunlee/article/details/70254835
http://blog.sina.com.cn/s/blog_4a540be60102v44s.html
https://www.zhihu.com/question/48747960/answer/112787533
http://blog.csdn.net/h532600610/article/details/51800488
http://blog.csdn.net/guduruyu/article/details/69537083
http://www.baike.com/wiki/stereoCalibrate