opencv:4.1.1
/opencv-4.1.1/modules/calib3d/src/calibration.cpp
cv::stereoCalibrate 参数说明看opencv文档,这个函数作用就是进行参数检查,然后执行cvStereoCalibrateImpl
//透视模型双目校准
double cv::stereoCalibrate( InputArrayOfArrays _objectPoints,
InputArrayOfArrays _imagePoints1,
InputArrayOfArrays _imagePoints2,
InputOutputArray _cameraMatrix1, InputOutputArray _distCoeffs1,
InputOutputArray _cameraMatrix2, InputOutputArray _distCoeffs2,
Size imageSize, InputOutputArray _Rmat, InputOutputArray _Tmat,
OutputArray _Emat, OutputArray _Fmat,
OutputArray _perViewErrors, int flags ,
TermCriteria criteria)
{
int rtype = CV_64F;
//将输入类型转变为Mat形式
Mat cameraMatrix1 = _cameraMatrix1.getMat();
Mat cameraMatrix2 = _cameraMatrix2.getMat();
Mat distCoeffs1 = _distCoeffs1.getMat();
Mat distCoeffs2 = _distCoeffs2.getMat();
//如果是输入的就检查size
cameraMatrix1 = prepareCameraMatrix(cameraMatrix1, rtype, flags);
cameraMatrix2 = prepareCameraMatrix(cameraMatrix2, rtype, flags);
distCoeffs1 = prepareDistCoeffs(distCoeffs1, rtype);
distCoeffs2 = prepareDistCoeffs(distCoeffs2, rtype);
//按位与 提取flag结果
if( !(flags & CALIB_RATIONAL_MODEL) &&
(!(flags & CALIB_THIN_PRISM_MODEL)) &&
(!(flags & CALIB_TILTED_MODEL)))
{
distCoeffs1 = distCoeffs1.rows == 1 ? distCoeffs1.colRange(0, 5) : distCoeffs1.rowRange(0, 5);
distCoeffs2 = distCoeffs2.rows == 1 ? distCoeffs2.colRange(0, 5) : distCoeffs2.rowRange(0, 5);
}
if((flags & CALIB_USE_EXTRINSIC_GUESS) == 0)
{
_Rmat.create(3, 3, rtype);
_Tmat.create(3, 1, rtype);
}
Mat objPt, imgPt, imgPt2, npoints;
//检测 世界坐标系下的角点个数 和 图像中识别的个数 是不是一致的,然后把这些点转成mat输出出去,objPt:(1, 点个数, CV_32FC3) 保存世界坐标下点坐标, imgPt, imgPt2:create(1, (int)total, CV_32FC2)保存像素坐标系下点坐标, npoints:create(1, (int)nimages, CV_32S) 保存点个数;
collectCalibrationData( _objectPoints, _imagePoints1, _imagePoints2,
objPt, imgPt, &imgPt2, npoints );
//各项参数转成 CvMat
CvMat c_objPt = cvMat(objPt), c_imgPt = cvMat(imgPt), c_imgPt2 = cvMat(imgPt2), c_npoints = cvMat(npoints);
CvMat c_cameraMatrix1 = cvMat(cameraMatrix1), c_distCoeffs1 = cvMat(distCoeffs1);
CvMat c_cameraMatrix2 = cvMat(cameraMatrix2), c_distCoeffs2 = cvMat(distCoeffs2);
Mat matR_ = _Rmat.getMat(), matT_ = _Tmat.getMat();
CvMat c_matR = cvMat(matR_), c_matT = cvMat(matT_), c_matE, c_matF, c_matErr;
//判断E F error是否需要
bool E_needed = _Emat.needed(), F_needed = _Fmat.needed(), errors_needed = _perViewErrors.needed();
//如果需要各项参数,则建立对应矩阵
Mat matE_, matF_, matErr_;
if( E_needed )
{
_Emat.create(3, 3, rtype);
matE_ = _Emat.getMat();
c_matE = cvMat(matE_);
}
if( F_needed )
{
_Fmat.create(3, 3, rtype);
matF_ = _Fmat.getMat();
c_matF = cvMat(matF_);
}
if( errors_needed )
{
int nimages = int(_objectPoints.total());
_perViewErrors.create(nimages, 2, CV_64F);
matErr_ = _perViewErrors.getMat();
c_matErr = cvMat(matErr_);
}
//主要实现函数
double err = cvStereoCalibrateImpl(&c_objPt, &c_imgPt, &c_imgPt2, &c_npoints, &c_cameraMatrix1,
&c_distCoeffs1, &c_cameraMatrix2, &c_distCoeffs2, cvSize(imageSize), &c_matR,
&c_matT, E_needed ? &c_matE : NULL, F_needed ? &c_matF : NULL,
errors_needed ? &c_matErr : NULL, flags, cvTermCriteria(criteria));
cameraMatrix1.copyTo(_cameraMatrix1);
cameraMatrix2.copyTo(_cameraMatrix2);
distCoeffs1.copyTo(_distCoeffs1);
distCoeffs2.copyTo(_distCoeffs2);
return err;
}
cvStereoCalibrateImpl:参数没啥好说明的
功能:1、建立各个参数的double数组和共用内存的Mat矩阵
2、获取各种参数的初值,内参、畸变如果没有初始值则进行单目校准的核心函数进行单目校准
3、构建LM求解器,
4、利用反投影获得重投影误差,建立优化式进行优化
//主要实现函数:
static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _imagePoints1,
const CvMat* _imagePoints2, const CvMat* _npoints,
CvMat* _cameraMatrix1, CvMat* _distCoeffs1,
CvMat* _cameraMatrix2, CvMat* _distCoeffs2,
CvSize imageSize, CvMat* matR, CvMat* matT,
CvMat* matE, CvMat* matF,
CvMat* perViewErr, int flags,
CvTermCriteria termCrit )
{
//内参个数4+14
const int NINTRINSIC = 18;
//
Ptr<CvMat> npoints, imagePoints[2], objectPoints, RT0;
double reprojErr = 0;
//定义相机矩阵A和畸变矩阵dk
double A[2][9], dk[2][14]={{0}}, rlr[9];
//定义相机矩阵K和畸变矩阵Dist
CvMat K[2], Dist[2], om_LR, T_LR;
CvMat R_LR = cvMat(3, 3, CV_64F, rlr);
//
int i, k, p, ni = 0, ofs, nimages, pointsTotal, maxPoints = 0;
int nparams;
bool recomputeIntrinsics = false;
double aspectRatio[2] = {0};
//参数检查
CV_Assert( CV_IS_MAT(_imagePoints1) && CV_IS_MAT(_imagePoints2) &&
CV_IS_MAT(_objectPoints) && CV_IS_MAT(_npoints) &&
CV_IS_MAT(matR) && CV_IS_MAT(matT) );
CV_Assert( CV_ARE_TYPES_EQ(_imagePoints1, _imagePoints2) &&
CV_ARE_DEPTHS_EQ(_imagePoints1, _objectPoints) );
CV_Assert( (_npoints->cols == 1 || _npoints->rows == 1) &&
CV_MAT_TYPE(_npoints->type) == CV_32SC1 );
// 这里我感觉是获取有多少图片,但是没捋清楚思路
nimages = _npoints->cols + _npoints->rows - 1;
npoints.reset(cvCreateMat( _npoints->rows, _npoints->cols, _npoints->type ));
cvCopy( _npoints, npoints );
// 遍历所有图片,获取图片中最多的点是有多少个,计算点的总数目 如果棋盘格都检测到 就是 图片数目*角点数目
for( i = 0, pointsTotal = 0; i < nimages; i++ )
{
maxPoints = MAX(maxPoints, npoints->data.i[i]);
pointsTotal += npoints->data.i[i];
}
//格式变换
objectPoints.reset(cvCreateMat( _objectPoints->rows, _objectPoints->cols,
CV_64FC(CV_MAT_CN(_objectPoints->type))));
cvConvert( _objectPoints, objectPoints );
cvReshape( objectPoints, objectPoints, 3, 1 );
//两个相机做两遍
for( k = 0; k < 2; k++ )
{
//根据相机编号赋值
const CvMat* points = k == 0 ? _imagePoints1 : _imagePoints2;
const CvMat* cameraMatrix = k == 0 ? _cameraMatrix1 : _cameraMatrix2;
const CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;
//检测点的格式
int cn = CV_MAT_CN(_imagePoints1->type);
CV_Assert( (CV_MAT_DEPTH(_imagePoints1->type) == CV_32F ||
CV_MAT_DEPTH(_imagePoints1->type) == CV_64F) &&
((_imagePoints1->rows == pointsTotal && _imagePoints1->cols*cn == 2) ||
(_imagePoints1->rows == 1 && _imagePoints1->cols == pointsTotal && cn == 2)) );
//定义内参矩阵 和畸变矩阵
K[k] = cvMat(3,3,CV_64F,A[k]);
Dist[k] = cvMat(1,14,CV_64F,dk[k]);
//2d点阵 Reshape
imagePoints[k].reset(cvCreateMat( points->rows, points->cols, CV_64FC(CV_MAT_CN(points->type))));
cvConvert( points, imagePoints[k] );
cvReshape( imagePoints[k], imagePoints[k], 2, 1 );
//如果固定内参+固定焦距+固定焦距比+使用初始值 有一个满足直接把参数进行copy
if( flags & (CALIB_FIX_INTRINSIC|CALIB_USE_INTRINSIC_GUESS|
CALIB_FIX_ASPECT_RATIO|CALIB_FIX_FOCAL_LENGTH) )
cvConvert( cameraMatrix, &K[k] );
//如果固定D 就copy参数
if( flags & (CALIB_FIX_INTRINSIC|CALIB_USE_INTRINSIC_GUESS|
CALIB_FIX_K1|CALIB_FIX_K2|CALIB_FIX_K3|CALIB_FIX_K4|CALIB_FIX_K5|CALIB_FIX_K6|CALIB_FIX_TANGENT_DIST) )
{
CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,
CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );
cvConvert( distCoeffs, &tdist );
}
//如果 不固定内参+畸变,或者不使用预定参数 就计算参数
if( !(flags & (CALIB_FIX_INTRINSIC|CALIB_USE_INTRINSIC_GUESS)))
{
//这是单目校准核心程序 待注释
cvCalibrateCamera2( objectPoints, imagePoints[k],
npoints, imageSize, &K[k], &Dist[k], NULL, NULL, flags );
}
}
//约束两个相机的焦距一致, 这里直接取平均
if( flags & CALIB_SAME_FOCAL_LENGTH )
{
static const int avg_idx[] = { 0, 4, 2, 5, -1 };
for( k = 0; avg_idx[k] >= 0; k++ )
A[0][avg_idx[k]] = A[1][avg_idx[k]] = (A[0][avg_idx[k]] + A[1][avg_idx[k]])*0.5;
}
//约束两个相机 焦距比例一致,这里获取了焦距比例
if( flags & CALIB_FIX_ASPECT_RATIO )
{
for( k = 0; k < 2; k++ )
aspectRatio[k] = A[k][0]/A[k][4];
}
//根据标志位 是否重新计算内参?
recomputeIntrinsics = (flags & CALIB_FIX_INTRINSIC) == 0;
//这几个参数很重要,需要着重看一下
/*
我的理解是:其构建的是最小化投影误差(2d) min ||u-u'|| 构成的优化变量矩阵X是 (1,图片张数 *(6+1) + NINTRINSIC*2)^T,构成优化矩阵AX = 0 然后问题是A是什么:
从输入可知,输入变量是nimages张图*每张图的点,A的维度就是 (nimages张图*每张图,图片张数 *(6+1)+ NINTRINSIC*2),J^T是A对X的偏导,按照每张图片对J分块 那么J =
J_LR1 Je1 0 0 ... 0 Ji1
J_LR2 0 Je2 0 ... 0 Ji2
J_LR3 0 0 Je3 ... 0 Ji3
... ... ... ... ... ... ...
J_LRn 0 0 0 ... Jen Jin
J^T =
J_LR1 J_LR2 J_LR3 ... J_LRn
Je1 0 0 ... 0
0 Je2 0 ... 0
0 0 Je3 ... 0
... ... ... ... ...
Ji1 Ji2 Ji3 ... Jin
JtJ = (必为对称阵,所以只写一半)
sum(J_LRk^T*J_LRk) J_LR1^T*Je1 J_LR1^T*Je2 ... J_LR1^T*Jen sum(J_LRk^T*Jik)
Je1^T*Je1 0 ... 0 Je1^T*Ji1
Je2^T*Je2 ... 0 Je2^T*Ji2
... ... ...
Jen^T*Jen Jen^T*Jin
sum(Jik^T*Jik)
JtErr 是方程右侧 JtErr = Jt * error
sum(J_LRk^T*Jik)
Je1^T*Ji1
Je2^T*Ji2
...
Jen^T*Jin
sum(Jik^T*Jik)
*/
Mat err( maxPoints*2, 1, CV_64F );
Mat Je( maxPoints*2, 6, CV_64F );
Mat J_LR( maxPoints*2, 6, CV_64F );
Mat Ji( maxPoints*2, NINTRINSIC, CV_64F, Scalar(0) );
// we optimize for the inter-camera R(3),t(3), then, optionally,
// for intrinisic parameters of each camera ((fx,fy,cx,cy,k1,k2,p1,p2) ~ 8 parameters).
//优化变量参数个数 6 是 3个旋转自由度 + 3个平移自由度 18是一个相机的参数 分别是 fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4.tx,ty
//这里为什么加一? : 优化变量有:1、每个图像的外参,2、内参 那样的话 就就是 2(相机个数) * 6 (自由度) * imagenum + 18,考虑到实际上左右目的相对位姿才是我们需要的,并且这个值是不变的
//因此 这里将其简化为 左右目相对外参(6),左目相对于图片外参(6*imagenum),相机内参(18)
nparams = 6*(nimages+1) + (recomputeIntrinsics ? NINTRINSIC*2 : 0);
//优化变量个数,样本个数 0的时候不关心样本数 但是要额外给雅克比矩阵, 迭代停止标准
CvLevMarq solver( nparams, 0, termCrit );
if(flags & CALIB_USE_LU) {
solver.solveMethod = DECOMP_LU;
}
//如果需要优化内参
if( recomputeIntrinsics )
{
// solver.mask 标记优化过程中不优化的量。0代表不优化,1代表优化。大小跟param相同
uchar* imask = solver.mask->data.ptr + nparams - NINTRINSIC*2;
//是否启用参数 k4 k5 k6,启用则优化,不启用就不优化(即fix)
if( !(flags & CALIB_RATIONAL_MODEL) )
flags |= CALIB_FIX_K4 | CALIB_FIX_K5 | CALIB_FIX_K6;
//是否启动系数s1 s2 s3 s4,不启用就不优化
if( !(flags & CALIB_THIN_PRISM_MODEL) )
flags |= CALIB_FIX_S1_S2_S3_S4;
//是否启用参数tauX and tauY (tx,ty)
if( !(flags & CALIB_TILTED_MODEL) )
flags |= CALIB_FIX_TAUX_TAUY;
//根据参数flag,锁定不优化的量。两个相机,所以一口气赋值两个 优化fy,固定fx和fy的比例
if( flags & CALIB_FIX_ASPECT_RATIO )
imask[0] = imask[NINTRINSIC] = 0;
//固定fx fy
if( flags & CALIB_FIX_FOCAL_LENGTH )
imask[0] = imask[1] = imask[NINTRINSIC] = imask[NINTRINSIC+1] = 0;
//固定cx cy
if( flags & CALIB_FIX_PRINCIPAL_POINT )
imask[2] = imask[3] = imask[NINTRINSIC+2] = imask[NINTRINSIC+3] = 0;
//固定切向失真参数
if( flags & (CALIB_ZERO_TANGENT_DIST|CALIB_FIX_TANGENT_DIST) )
imask[6] = imask[7] = imask[NINTRINSIC+6] = imask[NINTRINSIC+7] = 0;
//固定径向失真参数
if( flags & CALIB_FIX_K1 )
imask[4] = imask[NINTRINSIC+4] = 0;
if( flags & CALIB_FIX_K2 )
imask[5] = imask[NINTRINSIC+5] = 0;
if( flags & CALIB_FIX_K3 )
imask[8] = imask[NINTRINSIC+8] = 0;
if( flags & CALIB_FIX_K4 )
imask[9] = imask[NINTRINSIC+9] = 0;
if( flags & CALIB_FIX_K5 )
imask[10] = imask[NINTRINSIC+10] = 0;
if( flags & CALIB_FIX_K6 )
imask[11] = imask[NINTRINSIC+11] = 0;
//固定歪斜程度
if( flags & CALIB_FIX_S1_S2_S3_S4 )
{
imask[12] = imask[NINTRINSIC+12] = 0;
imask[13] = imask[NINTRINSIC+13] = 0;
imask[14] = imask[NINTRINSIC+14] = 0;
imask[15] = imask[NINTRINSIC+15] = 0;
}
//固定
if( flags & CALIB_FIX_TAUX_TAUY )
{
imask[16] = imask[NINTRINSIC+16] = 0;
imask[17] = imask[NINTRINSIC+17] = 0;
}
}
//一个存储空间 保存每张图片相对于左右相机的外参R,t的中值
// storage for initial [om(R){i}|t{i}] (in order to compute the median for each component)
RT0.reset(cvCreateMat( 6, nimages, CV_64F ));
/*
Compute initial estimate of pose
For each image, compute:
R(om) is the rotation matrix of om
om(R) is the rotation vector of R
R_ref = R(om_right) * R(om_left)'
T_ref_list = [T_ref_list; T_right - R_ref * T_left]
om_ref_list = {om_ref_list; om(R_ref)]
om = median(om_ref_list)
T = median(T_ref_list)
*/
//对所有图片进行遍历
for( i = ofs = 0; i < nimages; ofs += ni, i++ )
{
//获取第一张图像上有多少点
ni = npoints->data.i[i];
CvMat objpt_i;
//om旋转向量, r旋转矩阵 t平移
double _om[2][3], r[2][9], t[2][3];
//同上 只是数据格式不一样
CvMat om[2], R[2], T[2], imgpt_i[2];
//读取点阵数据
objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);
//对两个相机分别做:
for( k = 0; k < 2; k++ )
{
//获取像素坐标系下的点坐标,并初始化矩阵,绑定double *
imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);
om[k] = cvMat(3, 1, CV_64F, _om[k]);
R[k] = cvMat(3, 3, CV_64F, r[k]);
T[k] = cvMat(3, 1, CV_64F, t[k]);
//求外参,获取相机坐标系和世界坐标系下之间的旋转向量和平移向量
cvFindExtrinsicCameraParams2( &objpt_i, &imgpt_i[k], &K[k], &Dist[k], &om[k], &T[k] );
//旋转向量转外参
cvRodrigues2( &om[k], &R[k] );
//对左目参数进行固定,固定优化方向 和 g2o固定节点的原理一样
if( k == 0 )
{
// save initial om_left and T_left
//设置参数,db是double类型,每张图片都给
solver.param->data.db[(i+1)*6] = _om[0][0];
solver.param->data.db[(i+1)*6 + 1] = _om[0][1];
solver.param->data.db[(i+1)*6 + 2] = _om[0][2];
solver.param->data.db[(i+1)*6 + 3] = t[0][0];
solver.param->data.db[(i+1)*6 + 4] = t[0][1];
solver.param->data.db[(i+1)*6 + 5] = t[0][2];
}
}
//广义矩阵乘法运算 (乘数矩阵1src1,乘数矩阵2src2,乘数矩阵1系数alpha,加权矩阵src3,乘数矩阵2系数beta,结果矩阵dst,变换标记)
//dst = (alpha*src1)*src2+(beta*src3) CV_GEMM_B_T表示转置 src2
cvGEMM( &R[1], &R[0], 1, 0, 0, &R[0], CV_GEMM_B_T );
cvGEMM( &R[0], &T[0], -1, &T[1], 1, &T[1] );
//罗德里格斯变换,旋转矩阵到旋转向量
cvRodrigues2( &R[0], &T[0] );
//这里保存 要注意的事情是:cvGEMM( &R[0], &T[0], -1, &T[1], 1, &T[1] ),将T[1] 转变为相对位移; cvRodrigues2( &R[0], &T[0] ) 将 T[0]转变为相对旋转向量 t和T共用一片内存,所以t在这里不是左右目相对于世界坐标系的外参,而是左右目的相对位姿
RT0->data.db[i] = t[0][0];
RT0->data.db[i + nimages] = t[0][1];
RT0->data.db[i + nimages*2] = t[0][2];
RT0->data.db[i + nimages*3] = t[1][0];
RT0->data.db[i + nimages*4] = t[1][1];
RT0->data.db[i + nimages*5] = t[1][2];
}
//如果参数有一个初始值提供,直接将R,T进行赋值
if(flags & CALIB_USE_EXTRINSIC_GUESS)
{
Vec3d R, T;
cvarrToMat(matT).convertTo(T, CV_64F);
if( matR->rows == 3 && matR->cols == 3 )
Rodrigues(cvarrToMat(matR), R);
else
cvarrToMat(matR).convertTo(R, CV_64F);
solver.param->data.db[0] = R[0];
solver.param->data.db[1] = R[1];
solver.param->data.db[2] = R[2];
solver.param->data.db[3] = T[0];
solver.param->data.db[4] = T[1];
solver.param->data.db[5] = T[2];
}
//如果参数没有初始值,就取中值
else
{
// find the medians and save the first 6 parameters
for( i = 0; i < 6; i++ )
{
//排序 (开始指针,元素个数,元素类型,排序方法)
qsort( RT0->data.db + i*nimages, nimages, CV_ELEM_SIZE(RT0->type), dbCmp );
//判断图片数目是不是偶数。 然后将中位数赋值给参数
solver.param->data.db[i] = nimages % 2 != 0 ? RT0->data.db[i*nimages + nimages/2] :
(RT0->data.db[i*nimages + nimages/2 - 1] + RT0->data.db[i*nimages + nimages/2])*0.5;
}
}
//是否优化参数,优化的话就给初始值
if( recomputeIntrinsics )
//两个相机都要给
for( k = 0; k < 2; k++ )
{
//不同相机起始指针不一样
double* iparam = solver.param->data.db + (nimages+1)*6 + k*NINTRINSIC;
if( flags & CALIB_ZERO_TANGENT_DIST )
dk[k][2] = dk[k][3] = 0;
// iparam 顺序 fx ,fy,cx,cy
iparam[0] = A[k][0]; iparam[1] = A[k][4]; iparam[2] = A[k][2]; iparam[3] = A[k][5];
// 畸变参数
iparam[4] = dk[k][0]; iparam[5] = dk[k][1]; iparam[6] = dk[k][2];
iparam[7] = dk[k][3]; iparam[8] = dk[k][4]; iparam[9] = dk[k][5];
iparam[10] = dk[k][6]; iparam[11] = dk[k][7];
iparam[12] = dk[k][8];
iparam[13] = dk[k][9];
iparam[14] = dk[k][10];
iparam[15] = dk[k][11];
iparam[16] = dk[k][12];
iparam[17] = dk[k][13];
}
//相对旋转矩阵 和 相对位置
om_LR = cvMat(3, 1, CV_64F, solver.param->data.db);
T_LR = cvMat(3, 1, CV_64F, solver.param->data.db + 3);
//循环
for(;;)
{
//
const CvMat* param = 0;
CvMat *JtJ = 0, *JtErr = 0;
double *_errNorm = 0;
double _omR[3], _tR[3];
double _dr3dr1[9], _dr3dr2[9], /*_dt3dr1[9],*/ _dt3dr2[9], _dt3dt1[9], _dt3dt2[9];
CvMat dr3dr1 = cvMat(3, 3, CV_64F, _dr3dr1);
CvMat dr3dr2 = cvMat(3, 3, CV_64F, _dr3dr2);
//CvMat dt3dr1 = cvMat(3, 3, CV_64F, _dt3dr1);
CvMat dt3dr2 = cvMat(3, 3, CV_64F, _dt3dr2);
CvMat dt3dt1 = cvMat(3, 3, CV_64F, _dt3dt1);
CvMat dt3dt2 = cvMat(3, 3, CV_64F, _dt3dt2);
CvMat om[2], T[2], imgpt_i[2];
//迭代终止条件 JtJ正规方程左侧的部分,JtErr正规方程右侧的部分,_errNorm更新后的误差
if( !solver.updateAlt( param, JtJ, JtErr, _errNorm ))
break;
reprojErr = 0;
// 获取旋转矩阵
cvRodrigues2( &om_LR, &R_LR );
//参数绑定
om[1] = cvMat(3,1,CV_64F,_omR);
T[1] = cvMat(3,1,CV_64F,_tR);
//是否优化参数
if( recomputeIntrinsics )
{
//定义参数指针iparam是当前优化参数 ipparam是上一步优化参数,可以作为迭代终止条件,判断变量是否再优化,也可以在这个基础上减去一个方向,获取下一个参数
double* iparam = solver.param->data.db + (nimages+1)*6;
double* ipparam = solver.prevParam->data.db + (nimages+1)*6;
//是否设定左右目焦距相同
if( flags & CALIB_SAME_FOCAL_LENGTH )
{
iparam[NINTRINSIC] = iparam[0];
iparam[NINTRINSIC+1] = iparam[1];
ipparam[NINTRINSIC] = ipparam[0];
ipparam[NINTRINSIC+1] = ipparam[1];
}
//是否固定焦距比
if( flags & CALIB_FIX_ASPECT_RATIO )
{
iparam[0] = iparam[1]*aspectRatio[0];
iparam[NINTRINSIC] = iparam[NINTRINSIC+1]*aspectRatio[1];
ipparam[0] = ipparam[1]*aspectRatio[0];
ipparam[NINTRINSIC] = ipparam[NINTRINSIC+1]*aspectRatio[1];
}
//两个相机 各自输出相机参数给A,dk (相当于函数输出)
for( k = 0; k < 2; k++ )
{
A[k][0] = iparam[k*NINTRINSIC+0];
A[k][4] = iparam[k*NINTRINSIC+1];
A[k][2] = iparam[k*NINTRINSIC+2];
A[k][5] = iparam[k*NINTRINSIC+3];
dk[k][0] = iparam[k*NINTRINSIC+4];
dk[k][1] = iparam[k*NINTRINSIC+5];
dk[k][2] = iparam[k*NINTRINSIC+6];
dk[k][3] = iparam[k*NINTRINSIC+7];
dk[k][4] = iparam[k*NINTRINSIC+8];
dk[k][5] = iparam[k*NINTRINSIC+9];
dk[k][6] = iparam[k*NINTRINSIC+10];
dk[k][7] = iparam[k*NINTRINSIC+11];
dk[k][8] = iparam[k*NINTRINSIC+12];
dk[k][9] = iparam[k*NINTRINSIC+13];
dk[k][10] = iparam[k*NINTRINSIC+14];
dk[k][11] = iparam[k*NINTRINSIC+15];
dk[k][12] = iparam[k*NINTRINSIC+16];
dk[k][13] = iparam[k*NINTRINSIC+17];
}
}
//按图片序列,迭代所有图像
for( i = ofs = 0; i < nimages; ofs += ni, i++ )
{
ni = npoints->data.i[i];
CvMat objpt_i;
//获取左目外参 opencv坐标系都是 Tcw
om[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6);
T[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6+3);
//起始肯定为0,不满足条件执行else,其他情况下统统获得偏导数矩阵
if( JtJ || JtErr )
//这个函数参考了matlab compose_motion.m 1:左 2:相对 3:右
//(左目旋转外参,左目平移外参,相机相对旋转,相机相对旋转,相对位移,右目旋转外参,右目平移外参,偏导数矩阵..)
cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1], &dr3dr1, 0,
&dr3dr2, 0, 0, &dt3dt1, &dt3dr2, &dt3dt2 );
else
cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1] );
//
objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);
//折算到右目的图片上,因为右目点个数为ni,点维度是2d,所以下面的都是ni*2的 size
err.resize(ni*2); Je.resize(ni*2); J_LR.resize(ni*2); Ji.resize(ni*2);
//变成 1行 n列 2通道的矩阵
CvMat tmpimagePoints = cvMat(err.reshape(2, 1));
//ji 对 焦距求导
CvMat dpdf = cvMat(Ji.colRange(0, 2));
//ji 对 偏置求导
CvMat dpdc = cvMat(Ji.colRange(2, 4));
//ji 对畸变参数求导
CvMat dpdk = cvMat(Ji.colRange(4, NINTRINSIC));
//je 对旋转求导
CvMat dpdrot = cvMat(Je.colRange(0, 3));
//je 对平移求导
CvMat dpdt = cvMat(Je.colRange(3, 6));
//左右目分别进去
for( k = 0; k < 2; k++ )
{
// 点 1 ni 2 提取第i副图像的所有点
imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);
// JtJ或者JtErr非全零
if( JtJ || JtErr )
//相当于先算左目再算右目,求3d点到2d点的映射 这里面的求偏导的值很重要
cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k],
&tmpimagePoints, &dpdrot, &dpdt, &dpdf, &dpdc, &dpdk,
(flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio[k] : 0);
else
cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k], &tmpimagePoints );
//tmpimagePoints = tmpimagePoints -imgpt_i[k]) 获取了点误差
cvSub( &tmpimagePoints, &imgpt_i[k], &tmpimagePoints );
//CALC_J是一个LM运算标志位,表示solver还在运算,没有达到终止条件
if( solver.state == CvLevMarq::CALC_J )
{
//iofs是所有param的个数,eofs是目前已迭代图像的参数(本质上这应该是系数矩阵,我猜后面是分块处理,或者做了边缘化)
int iofs = (nimages+1)*6 + k*NINTRINSIC, eofs = (i+1)*6;
assert( JtJ && JtErr );
Mat _JtJ(cvarrToMat(JtJ)), _JtErr(cvarrToMat(JtErr));
//如果是右目 右目需要通过相机变换,放到左目上
if( k == 1 )
{
// d(err_{x|y}R) ~ de3
// convert de3/{dr3,dt3} => de3{dr1,dt1} & de3{dr2,dt2}
//相当于把右目求出来的所有东西再通过链式法则传递给相对于左目的偏导
for( p = 0; p < ni*2; p++ )
{
CvMat de3dr3 = cvMat( 1, 3, CV_64F, Je.ptr(p));
CvMat de3dt3 = cvMat( 1, 3, CV_64F, de3dr3.data.db + 3 );
CvMat de3dr2 = cvMat( 1, 3, CV_64F, J_LR.ptr(p) );
CvMat de3dt2 = cvMat( 1, 3, CV_64F, de3dr2.data.db + 3 );
double _de3dr1[3], _de3dt1[3];
CvMat de3dr1 = cvMat( 1, 3, CV_64F, _de3dr1 );
CvMat de3dt1 = cvMat( 1, 3, CV_64F, _de3dt1 );
cvMatMul( &de3dr3, &dr3dr1, &de3dr1 );
cvMatMul( &de3dt3, &dt3dt1, &de3dt1 );
cvMatMul( &de3dr3, &dr3dr2, &de3dr2 );
cvMatMulAdd( &de3dt3, &dt3dr2, &de3dr2, &de3dr2 );
cvMatMul( &de3dt3, &dt3dt2, &de3dt2 );
cvCopy( &de3dr1, &de3dr3 );
cvCopy( &de3dt1, &de3dt3 );
}
//这里 构建JtJ和JtErr
_JtJ(Rect(0, 0, 6, 6)) += J_LR.t()*J_LR;
_JtJ(Rect(eofs, 0, 6, 6)) = J_LR.t()*Je;
_JtErr.rowRange(0, 6) += J_LR.t()*err;
}
//左目和J_LR无关,所以这个值为0
_JtJ(Rect(eofs, eofs, 6, 6)) += Je.t()*Je;
_JtErr.rowRange(eofs, eofs + 6) += Je.t()*err;
//优化参数
if( recomputeIntrinsics )
{
_JtJ(Rect(iofs, iofs, NINTRINSIC, NINTRINSIC)) += Ji.t()*Ji;
_JtJ(Rect(iofs, eofs, NINTRINSIC, 6)) += Je.t()*Ji;
if( k == 1 )
{
_JtJ(Rect(iofs, 0, NINTRINSIC, 6)) += J_LR.t()*Ji;
}
_JtErr.rowRange(iofs, iofs + NINTRINSIC) += Ji.t()*err;
}
}
//把err求一下
double viewErr = norm(err, NORM_L2SQR);
if(perViewErr)
perViewErr->data.db[i*2 + k] = std::sqrt(viewErr/ni);
//累加重投影误差
reprojErr += viewErr;
}
}
//赋值
if(_errNorm)
*_errNorm = reprojErr;
}
//把结果求旋转矩阵,看看输出的是啥
cvRodrigues2( &om_LR, &R_LR );
if( matR->rows == 1 || matR->cols == 1 )
cvConvert( &om_LR, matR );
else
cvConvert( &R_LR, matR );
cvConvert( &T_LR, matT );
//如果计算内参 就对应赋值
if( recomputeIntrinsics )
{
cvConvert( &K[0], _cameraMatrix1 );
cvConvert( &K[1], _cameraMatrix2 );
for( k = 0; k < 2; k++ )
{
CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;
CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,
CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );
cvConvert( &tdist, distCoeffs );
}
}
//是否要输出本质矩阵和单应矩阵,输出的话就求一下
if( matE || matF )
{
double* t = T_LR.data.db;
double tx[] =
{
0, -t[2], t[1],
t[2], 0, -t[0],
-t[1], t[0], 0
};
CvMat Tx = cvMat(3, 3, CV_64F, tx);
double e[9], f[9];
CvMat E = cvMat(3, 3, CV_64F, e);
CvMat F = cvMat(3, 3, CV_64F, f);
cvMatMul( &Tx, &R_LR, &E );
if( matE )
cvConvert( &E, matE );
if( matF )
{
double ik[9];
CvMat iK = cvMat(3, 3, CV_64F, ik);
cvInvert(&K[1], &iK);
cvGEMM( &iK, &E, 1, 0, 0, &E, CV_GEMM_A_T );
cvInvert(&K[0], &iK);
cvMatMul(&E, &iK, &F);
cvConvertScale( &F, matF, fabs(f[8]) > 0 ? 1./f[8] : 1 );
}
}
//返回最后一步的平均每个点的重投影误差
return std::sqrt(reprojErr/(pointsTotal*2));
}
算法的核心是 cvComposeRT ,cvProjectPoints2这两个函数