cv::omni::StereoCalibrate 的代码逻辑和cv::StereoCalibrate相似。 在opencv库基础上稍微改动。
//omni单目标定 输入calibrate(世界坐标系下参考点,图像坐标系下图像点,图片尺寸,内参矩阵,xi,畸变矩阵,外参R,外参t,flag参数,迭代限制,图像索引)
double calibrate(cv::InputArrayOfArrays patternPoints, cv::InputArrayOfArrays imagePoints, cv::Size size, cv::InputOutputArray K, cv::InputOutputArray xi, cv::InputOutputArray D, cv::OutputArrayOfArrays omAll, cv::OutputArrayOfArrays tAll, int flags, cv::TermCriteria criteria, cv::OutputArray idx)
{
//参数检查
CV_Assert(!patternPoints.empty() && !imagePoints.empty() && patternPoints.total() == imagePoints.total());
CV_Assert((patternPoints.type() == CV_64FC3 && imagePoints.type() == CV_64FC2) ||
(patternPoints.type() == CV_32FC3 && imagePoints.type() == CV_32FC2));
CV_Assert(patternPoints.getMat(0).channels() == 3 && imagePoints.getMat(0).channels() == 2);
CV_Assert((!K.empty() && K.size() == cv::Size(3,3)) || K.empty());
CV_Assert((!D.empty() && D.total() == 4) || D.empty());
CV_Assert((!xi.empty() && xi.total() == 1) || xi.empty());
CV_Assert((!omAll.empty() && omAll.depth() == patternPoints.depth()) || omAll.empty());
CV_Assert((!tAll.empty() && tAll.depth() == patternPoints.depth()) || tAll.empty());
int depth = patternPoints.depth();
//_patternPoints参考点 _imagePoints图像点
std::vector<cv::Mat> _patternPoints, _imagePoints;
for (int i = 0; i < (int)patternPoints.total(); ++i)
{
_patternPoints.push_back(patternPoints.getMat(i));
_imagePoints.push_back(imagePoints.getMat(i));
if (depth == CV_32F)
{
_patternPoints[i].convertTo(_patternPoints[i], CV_64FC3);
_imagePoints[i].convertTo(_imagePoints[i], CV_64FC2);
}
}
double _xi;
// 初始化
std::vector<cv::Vec3d> _omAll, _tAll;
cv::Matx33d _K;
cv::Matx14d _D;
cv::Mat _idx;
//初始化参数 这里获得了初始的内参,然后筛选了图片,留下了可用的,然后给出了这些图像的R,t
initializeCalibration(_patternPoints, _imagePoints, size, _omAll, _tAll, _K, _xi, _idx);
cout<<"mono initializeCalibration: _K"<<_K<<endl;
std::vector<cv::Mat> _patternPointsTmp = _patternPoints;
std::vector<cv::Mat> _imagePointsTmp = _imagePoints;
_patternPoints.clear();
_imagePoints.clear();
// 对不可用的图片进行剔除
for (int i = 0; i < (int)_idx.total(); i++)
{
_patternPoints.push_back(_patternPointsTmp[_idx.at<int>(i)]);
_imagePoints.push_back(_imagePointsTmp[_idx.at<int>(i)]);
}
// 获取可用的图片数
int n = (int)_patternPoints.size();
//建立参数列表 fx,fy,cx,cy,s,k1,k2,p1,p2,xi,[ri1,ri2,ri3,ti1,ti2,ti3] 按照 焦距,主点,s,畸变,xi,n张图的外参顺序进行排序 一共10 + 6*n个 (只是说个数 没说顺序)
cv::Mat finalParam(1, 10 + 6*n, CV_64F);
cv::Mat currentParam(1, 10 + 6*n, CV_64F);
//将已有的参数进行写入 目前已有 fx,fy,cx,cy,s,xi [ri1,ri2,ri3,ti1,ti2,ti3] 返回currentParam
//返回的currentParam参数顺序是 :[ri1,ri2,ri3,ti1,ti2,ti3],fx,fy,s,cx,cy,xi,k1,k2,p1,p2 这个很重要!
encodeParameters(_K, _omAll, _tAll, cv::Mat::zeros(1,4,CV_64F), _xi, currentParam);
// 优化参数1
const double alpha_smooth = 0.01;
//const double thresh_cond = 1e6;
double change = 1;
int iter ;
for(iter = 0; ; ++iter)
{
//criteria(int type, int maxCount, double epsilon); 当type==2时 要求最后输出的误差要小于epsilon才可以退出迭代
if ((criteria.type == 1 && iter >= criteria.maxCount) ||
(criteria.type == 2 && change <= criteria.epsilon) ||
(criteria.type == 3 && (change <= criteria.epsilon || iter >= criteria.maxCount)))
break;
//优化参数2 = 1 - (1 - 优化参数1)^(迭代次数+1) 随着迭代次数上升,优化参数2变大
double alpha_smooth2 = 1 - std::pow(1 - alpha_smooth, (double)iter + 1.0);
cv::Mat JTJ_inv, JTError,JTJ;
//epsilon随着迭代次数上升而下降
double epsilon = 0.01 * std::pow(0.9, (double)iter/10);
//计算雅克比矩阵
computeJacobian(_patternPoints, _imagePoints, currentParam, JTJ, JTError, flags, epsilon);
cv::Mat JTJ_mean,JTJ2;
// computereduce(JTJ,JTJ2,JTJ_mean);
JTError = JTError;
JTJ_inv = cv::Mat(JTJ+epsilon).inv();
// cv::Mat JTJ_inv2 = cv::Mat(JTJ).inv();
// Gauss - Newton
//这里是不是也有点问题?
cv::Mat G = alpha_smooth2*JTJ_inv * JTError;
//锁定参数赋0,不锁定的给一个值
fillFixed(G, flags, n);
//迭代
// G = G.mul(JTJ_mean.t());
finalParam = currentParam + G.t();
//求一个变化比率
change = norm(G) / norm(currentParam);
//赋值上一步的值
currentParam = finalParam.clone();
if(1)
// cout<<G.rowRange(6*n,6*n+10)<<endl;
{
// cout<<"epsilon:"<<epsilon<<endl;
// cout<<"alpha_smooth2:"<<alpha_smooth2<<endl;
}
// cout<<currentParam.colRange(6*n,6*n+10)<<endl;
//解码参数 调试用
{
decodeParameters(currentParam, _K, _omAll, _tAll, _D, _xi);
// double repr = computeMeanReproErr(_patternPoints, _imagePoints, _K, _D, _xi, _omAll, _tAll);
// std::cout<<"["<<iter<<"]:"<<"repr="<<repr<<std::endl;
}
}
//获取最终的结果
decodeParameters(currentParam, _K, _omAll, _tAll, _D, _xi);
cout<<"iter:"<<iter<<endl;
cout<<"change:"<<change<<endl;
//double repr = internal::computeMeanReproErr(_patternPoints, _imagePoints, _K, _D, _xi, _omAll, _tAll);
//挨个赋值
if (omAll.needed())
{
omAll.create((int)_omAll.size(), 1, CV_64FC3);
}
if (tAll.needed())
{
tAll.create((int)_tAll.size(), 1, CV_64FC3);
}
if (omAll.kind() == cv::_InputArray::STD_VECTOR_MAT)
{
for (int i = 0; i < n; ++i)
{
omAll.create(3, 1, CV_64F, i, true);
tAll.create(3, 1, CV_64F, i, true);
cv::Mat tmpom = cv::Mat(_omAll[i]);
cv::Mat tmpt = cv::Mat(_tAll[i]);
tmpom.convertTo(tmpom, CV_64F);
tmpt.convertTo(tmpt, CV_64F);
tmpom.copyTo(omAll.getMat(i));
tmpt.copyTo(tAll.getMat(i));
}
}
else
{
cv::Mat(_omAll).convertTo(omAll, CV_64FC3);
cv::Mat(_tAll).convertTo(tAll, CV_64FC3);
}
if(K.empty())
{
K.create(3, 3, CV_64F);
}
if (D.empty())
{
D.create(1, 4, CV_64F);
}
cv::Mat(_K).convertTo(K.getMat(), K.empty()? CV_64F : K.type());
cv::Mat(_D).convertTo(D.getMat(), D.empty() ? CV_64F: D.type());
if (xi.empty())
{
xi.create(1, 1, CV_64F);
}
cv::Mat xi_m = cv::Mat(1, 1, CV_64F);
xi_m.at<double>(0) = _xi;
xi_m.convertTo(xi.getMat(), xi.empty() ? CV_64F : xi.type());
if (idx.needed())
{
idx.create(1, (int)_idx.total(), CV_32S);
_idx.copyTo(idx.getMat());
}
//估计不确定度 rms是最后输出 输入 3d棋盘点,2d畸变图片点,参数,误差,标准差,重投影误差,标志位
cv::Vec2d std_error;
double rms;
cv::Mat errors;
estimateUncertainties(_patternPoints, _imagePoints, finalParam, errors, std_error, rms, flags);
return rms;
}
这里尝试换用了LM求解器,但是结果发散,暂无解决方案。
// 一张图上的若干点 点映射 3d->2d
void projectPoints(cv::InputArray objectPoints, cv::OutputArray imagePoints,cv::InputArray rvec, cv::InputArray tvec, cv::InputArray K, double xi, cv::InputArray D, cv::OutputArray jacobian)
{
//参数检查
CV_Assert(objectPoints.type() == CV_64FC3 || objectPoints.type() == CV_32FC3);
CV_Assert((rvec.depth() == CV_64F || rvec.depth() == CV_32F) && rvec.total() == 3);
CV_Assert((tvec.depth() == CV_64F || tvec.depth() == CV_32F) && tvec.total() == 3);
CV_Assert((K.type() == CV_64F || K.type() == CV_32F) && K.size() == cv::Size(3,3));
CV_Assert((D.type() == CV_64F || D.type() == CV_32F) && D.total() == 4);
//构建点阵
imagePoints.create(objectPoints.size(), CV_MAKETYPE(objectPoints.depth(), 2));
//图片数
int n = (int)objectPoints.total();
//根据参数类型 构建om 和 T
cv::Vec3d om = rvec.depth() == CV_32F ? (cv::Vec3d)*rvec.getMat().ptr<cv::Vec3f>() : *rvec.getMat().ptr<cv::Vec3d>();
cv::Vec3d T = tvec.depth() == CV_32F ? (cv::Vec3d)*tvec.getMat().ptr<cv::Vec3f>() : *tvec.getMat().ptr<cv::Vec3d>();
//焦距和主点
cv::Vec2d f,c;
double s;
if (K.depth() == CV_32F)
{
cv::Matx33f Kc = K.getMat();
f = cv::Vec2f(Kc(0,0), Kc(1,1));
c = cv::Vec2f(Kc(0,2),Kc(1,2));
s = (double)Kc(0,1);
}
else
{
cv::Matx33d Kc = K.getMat();
f = cv::Vec2d(Kc(0,0), Kc(1,1));
c = cv::Vec2d(Kc(0,2),Kc(1,2));
s = Kc(0,1);
}
//畸变矩阵
cv::Vec4d kp = D.depth() == CV_32F ? (cv::Vec4d)*D.getMat().ptr<cv::Vec4f>() : *D.getMat().ptr<cv::Vec4d>();
//Vec<double, 4> kp= (Vec<double,4>)*D.getMat().ptr<Vec<double,4> >();
//点指针
const cv::Vec3d* Xw_alld = objectPoints.getMat().ptr<cv::Vec3d>();
const cv::Vec3f* Xw_allf = objectPoints.getMat().ptr<cv::Vec3f>();
cv::Vec2d* xpd = imagePoints.getMat().ptr<cv::Vec2d>();
cv::Vec2f* xpf = imagePoints.getMat().ptr<cv::Vec2f>();
cv::Matx33d R;
cv::Matx<double, 3, 9> dRdom;
//旋转向量 -> 旋转矩阵
cv::Rodrigues(om, R, dRdom);
//是否需要雅克比矩阵
JacobianRow *Jn = 0;
if (jacobian.needed())
{
//内参变量是fx,fy,cx,cy,s,k1,k2,p1,p2,r1,r2,r3,t1,t2,t3,xi
int nvars = 2+2+1+4+3+3+1; // f,c,s,kp,om,T,xi
//n个点 2n个变量,2n*nvars 的维度
jacobian.create(2*int(n), nvars, CV_64F);
Jn = jacobian.getMat().ptr<JacobianRow>(0);
}
double k1 = kp[0], k2 = kp[1];
double p1 = kp[2], p2 = kp[3];
for (int i = 0; i < n; i++)
{
// convert to camera coordinate
//提取棋盘格上点 世界坐标系
cv::Vec3d Xw = objectPoints.depth() == CV_32F ? (cv::Vec3d)Xw_allf[i] : Xw_alld[i];
//坐标系变换 世界坐标系->相机坐标系 ?? 这里存疑
cv::Vec3d Xc = (cv::Vec3d)(R*Xw + T);
// convert to unit sphere
//投影到单位球
cv::Vec3d Xs = Xc/cv::norm(Xc);
// convert to normalized image plane
//换坐标系,然后投影到改坐标系单位平面上
cv::Vec2d xu = cv::Vec2d(Xs[0]/(Xs[2]+xi), Xs[1]/(Xs[2]+xi));
// 加畸变
cv::Vec2d xd;
double r2 = xu[0]*xu[0]+xu[1]*xu[1];
double r4 = r2*r2;
xd[0] = xu[0]*(1+k1*r2+k2*r4) + 2*p1*xu[0]*xu[1] + p2*(r2+2*xu[0]*xu[0]);
xd[1] = xu[1]*(1+k1*r2+k2*r4) + p1*(r2+2*xu[1]*xu[1]) + 2*p2*xu[0]*xu[1];
// convert to pixel coordinate
//考虑内参矩阵,将cam -> imgfinal就是最后求的点
cv::Vec2d final;
final[0] = f[0]*xd[0]+s*xd[1]+c[0];
final[1] = f[1]*xd[1]+c[1];
if (objectPoints.depth() == CV_32F)
{
xpf[i] = final;
}
else
{
xpd[i] = final;
}
/*xpd[i][0] = f[0]*xd[0]+s*xd[1]+c[0];
xpd[i][1] = f[1]*xd[1]+c[1];*/
//求雅克比矩阵 手动求了一遍 未发现问题
if (jacobian.needed())
{
double dXcdR_a[] = {Xw[0],Xw[1],Xw[2],0,0,0,0,0,0,
0,0,0,Xw[0],Xw[1],Xw[2],0,0,0,
0,0,0,0,0,0,Xw[0],Xw[1],Xw[2]};
cv::Matx<double,3, 9> dXcdR(dXcdR_a);
//dRdom 源于cvRodrigues2的输出都是3*9(不管其输入是啥),所以这里要做一个转置,这里验证了一下,没发现dRdom有啥问题。。
/*
//matlab
syms r1 r2 r3 1 n2 n3
seita = sqrt(r1*r1+r2*r2+r3*r3)
n1 = r1/seita
n2 = r2/seita
n3 = r3/seita
R = cos(seita)*[1,0,0;0,1,0;0,0,1] + (1 - cos(seita))*[n1*n1,n1*n2,n1*n3;n2*n1,n2*n2,n2*n3;n3*n1,n3*n2,n3*n3]+sin(seita)*[0,-n3,n2;n3,0,-n1;-n2,n1,0]
diff(R,ri)
*/
cv::Matx33d dXcdom = dXcdR * dRdom.t();
double r_1 = 1.0/norm(Xc);
double r_3 = pow(r_1,3);
cv::Matx33d dXsdXc(r_1-Xc[0]*Xc[0]*r_3, -(Xc[0]*Xc[1])*r_3, -(Xc[0]*Xc[2])*r_3,
-(Xc[0]*Xc[1])*r_3, r_1-Xc[1]*Xc[1]*r_3, -(Xc[1]*Xc[2])*r_3,
-(Xc[0]*Xc[2])*r_3, -(Xc[1]*Xc[2])*r_3, r_1-Xc[2]*Xc[2]*r_3);
cv::Matx23d dxudXs(1/(Xs[2]+xi), 0, -Xs[0]/(Xs[2]+xi)/(Xs[2]+xi),
0, 1/(Xs[2]+xi), -Xs[1]/(Xs[2]+xi)/(Xs[2]+xi));
// pre-compute some reusable things
double temp1 = 2*k1*xu[0] + 4*k2*xu[0]*r2;
double temp2 = 2*k1*xu[1] + 4*k2*xu[1]*r2;
cv::Matx22d dxddxu(k2*r4+6*p2*xu[0]+2*p1*xu[1]+xu[0]*temp1+k1*r2+1, 2*p1*xu[0]+2*p2*xu[1]+xu[0]*temp2,
2*p1*xu[0]+2*p2*xu[1]+xu[1]*temp1, k2*r4+2*p2*xu[0]+6*p1*xu[1]+xu[1]*temp2+k1*r2+1);
cv::Matx22d dxpddxd(f[0], s, 0, f[1]);
cv::Matx23d dxpddXc = dxpddxd * dxddxu * dxudXs * dXsdXc;
// derivative of xpd respect to om
cv::Matx23d dxpddom = dxpddXc * dXcdom;
cv::Matx33d dXcdT(1.0,0.0,0.0,
0.0,1.0,0.0,
0.0,0.0,1.0);
// derivative of xpd respect to T
cv::Matx23d dxpddT = dxpddXc * dXcdT;
cv::Matx21d dxudxi(-Xs[0]/(Xs[2]+xi)/(Xs[2]+xi),
-Xs[1]/(Xs[2]+xi)/(Xs[2]+xi));
// derivative of xpd respect to xi
cv::Matx21d dxpddxi = dxpddxd * dxddxu * dxudxi;
cv::Matx<double,2,4> dxddkp(xu[0]*r2, xu[0]*r4, 2*xu[0]*xu[1], r2+2*xu[0]*xu[0],
xu[1]*r2, xu[1]*r4, r2+2*xu[1]*xu[1], 2*xu[0]*xu[1]);
// derivative of xpd respect to kp
cv::Matx<double,2,4> dxpddkp = dxpddxd * dxddkp;
// derivative of xpd respect to f
cv::Matx22d dxpddf(xd[0], 0,
0, xd[1]);
// derivative of xpd respect to c
cv::Matx22d dxpddc(1, 0,
0, 1);
Jn[0].dom = dxpddom.row(0);
Jn[1].dom = dxpddom.row(1);
Jn[0].dT = dxpddT.row(0);
Jn[1].dT = dxpddT.row(1);
Jn[0].dkp = dxpddkp.row(0);
Jn[1].dkp = dxpddkp.row(1);
Jn[0].dxi = dxpddxi(0,0);
Jn[1].dxi = dxpddxi(1,0);
Jn[0].df = dxpddf.row(0);
Jn[1].df = dxpddf.row(1);
Jn[0].dc = dxpddc.row(0);
Jn[1].dc = dxpddc.row(1);
Jn[0].ds = xd[1];
Jn[1].ds = 0;
Jn += 2;
}
}
}
//求解重投影误差
double computeMeanReproErr(cv::InputArrayOfArrays imagePoints, cv::InputArrayOfArrays proImagePoints)
{
//参数检查
CV_Assert(!imagePoints.empty() && imagePoints.type()==CV_64FC2);
CV_Assert(!proImagePoints.empty() && proImagePoints.type() == CV_64FC2);
CV_Assert(imagePoints.total() == proImagePoints.total());
//获取多少张图片
int n = (int)imagePoints.total();
//误差
double reprojError = 0;
//总共多少个点
int totalPoints = 0;
//这里做了一个检测 看输入的点 是一张图片还是多张图片的
if (imagePoints.kind() == cv::_InputArray::STD_VECTOR_MAT)
{
for (int i = 0; i < n; i++)
{
cv::Mat x, proj_x;
imagePoints.getMat(i).copyTo(x);
proImagePoints.getMat(i).copyTo(proj_x);
cv::Mat errorI = x.reshape(2, x.rows*x.cols) - proj_x.reshape(2, proj_x.rows*proj_x.cols);
//Mat errorI = imagePoints.getMat(i) - proImagePoints.getMat(i);
totalPoints += (int)errorI.total();
cv::Vec2d* ptr_err = errorI.ptr<cv::Vec2d>();
for (int j = 0; j < (int)errorI.total(); j++)
{
reprojError += sqrt(ptr_err[j][0]*ptr_err[j][0] + ptr_err[j][1]*ptr_err[j][1]);
}
}
}
//一张图片
else
{
cv::Mat x, proj_x;
//数据格式变换
imagePoints.getMat().copyTo(x);
proImagePoints.getMat().copyTo(proj_x);
//直接reshape后求差
cv::Mat errorI = x.reshape(2, x.rows*x.cols) - proj_x.reshape(2, proj_x.rows*proj_x.cols);
//Mat errorI = imagePoints.getMat() - proImagePoints.getMat();
//算一下有多少点
totalPoints += (int)errorI.total();
cv::Vec2d* ptr_err = errorI.ptr<cv::Vec2d>();
//遍历所有的点,求一下二范数
for (int j = 0; j < (int)errorI.total(); j++)
{
reprojError += sqrt(ptr_err[j][0]*ptr_err[j][0] + ptr_err[j][1]*ptr_err[j][1]);
}
}
//总的误差 除以点数
double meanReprojError = reprojError / totalPoints;
return meanReprojError;
}
//初始化校准模型 initializeCalibration(世界坐标系下参考点,图像坐标系下图像点,图片尺寸,外参R,外参t,内参矩阵,xi,图像索引)
// https://www.sci-hub.ren/10.1109/iros.2013.6696517
void initializeCalibration(cv::InputArrayOfArrays patternPoints, cv::InputArrayOfArrays imagePoints, cv::Size size,cv::OutputArrayOfArrays omAll, cv::OutputArrayOfArrays tAll, cv::OutputArray K, double& xi, cv::OutputArray idx)
{
// For details please refer to Section III from Li's IROS 2013 paper
//论文中这里在不考虑畸变的情况下,假定fx=fy xi=1 给出了初始的外参和焦距f f是均值
double u0 = size.width / 2;
double v0 = size.height / 2;
//有多少张图片
int n_img = (int)imagePoints.total();
//建立外参阵
std::vector<cv::Vec3d> v_omAll(n_img), v_tAll(n_img);
//f阵
std::vector<double> gammaAll(n_img);
K.create(3, 3, CV_64F);
cv::Mat _K;
//遍历所有图片
for (int image_idx = 0; image_idx < n_img; ++image_idx)
{
cv::Mat objPoints, imgPoints;
//获取世界坐标系参考点阵和图像坐标系下点阵
patternPoints.getMat(image_idx).copyTo(objPoints);
imagePoints.getMat(image_idx).copyTo(imgPoints);
//获取点数目,这里要求棋盘格的点都被检测到
int n_point = imgPoints.rows * imgPoints.cols;
if (objPoints.rows != n_point)
objPoints = objPoints.reshape(3, n_point);
if (imgPoints.rows != n_point)
imgPoints = imgPoints.reshape(2, n_point);
// objectPoints should be 3-channel data, imagePoints should be 2-channel data
//检测参数信息
CV_Assert(objPoints.type() == CV_64FC3 && imgPoints.type() == CV_64FC2 );
//按通道分离 objpoint 是1*n_point*3 [x,y,0] 拆分成 3 个 cv::Mat 1*n_point 第一个列向量是所有的x,第二个是所有的y,第三个是全0
std::vector<cv::Mat> xy, uv;
cv::split(objPoints, xy);
cv::split(imgPoints, uv);
//这就把所有的x,y,u,v提取出,然后主点取了图片一般
cv::Mat x = xy[0].reshape(1, n_point), y = xy[1].reshape(1, n_point),
u = uv[0].reshape(1, n_point) - u0, v = uv[1].reshape(1, n_point) - v0;
//mul 两个矩阵对应元素相乘, sqrRho = u*u+v*v
cv::Mat sqrRho = u.mul(u) + v.mul(v);
// compute extrinsic parameters
cv::Mat M(n_point, 6, CV_64F);
/*
这里是源于 理论式:u(r21x-r22y+t2)-v(r11x+r12y+t1)=0
做优化 min F : F = u(r21x-r22y+t2)-v(r11x+r12y+t1)
按照[r11 r12 r21 r22 t1 t2]^T顺序求导 可得:A=[-vx -vy ux uy -v u]^T 求解AX=0
*/
cv::Mat(-v.mul(x)).copyTo(M.col(0));
cv::Mat(-v.mul(y)).copyTo(M.col(1));
cv::Mat(u.mul(x)).copyTo(M.col(2));
cv::Mat(u.mul(y)).copyTo(M.col(3));
cv::Mat(-v).copyTo(M.col(4));
cv::Mat(u).copyTo(M.col(5));
cv::Mat W,U,V;
//M=UWVT SVD分解
cv::SVD::compute(M, W, U, V,cv::SVD::FULL_UV);
V = V.t();
double miniReprojectError = 1e5;
// the signs of r1, r2, r3 are unknown, so they can be flipped.
// 取最后一列正负都试一试
for (int coef = 1; coef >= -1; coef-=2)
{
//取了最后一列 正负都试了一下
double r11 = V.at<double>(0, 5) * coef;
double r12 = V.at<double>(1, 5) * coef;
double r21 = V.at<double>(2, 5) * coef;
double r22 = V.at<double>(3, 5) * coef;
double t1 = V.at<double>(4, 5) * coef;
double t2 = V.at<double>(5, 5) * coef;
cv::Mat roots;
double r31s;
//这里用到了 r11*r11+r21*r21+r31*r31 = r12*r12+r22*r22+r32*r32 与 r11*r12+r21*r22+r31*r32=0 两个式子联立, 为啥不考虑 r11*r11+r21*r21+r31*r31 = r12*r12+r22*r22+r32*r32 =1??
cv::solvePoly(cv::Matx13d(-(r11*r12+r21*r22)*(r11*r12+r21*r22), r11*r11+r21*r21-r12*r12-r22*r22, 1), roots);
if (roots.at<cv::Vec2d>(0)[0] > 0)
r31s = sqrt(roots.at<cv::Vec2d>(0)[0]);
else
r31s = sqrt(roots.at<cv::Vec2d>(1)[0]);
//取了平方根,有正负
for (int coef2 = 1; coef2 >= -1; coef2-=2)
{
double r31 = r31s * coef2;
double r32 = -(r11*r12 + r21*r22) / r31;
//构建了r11,r12
cv::Vec3d r1(r11, r21, r31);
cv::Vec3d r2(r12, r22, r32);
//t3未知,暂时给0
cv::Vec3d t(t1, t2, 0);
double scale = 1 / cv::norm(r1);
r1 = r1 * scale;
r2 = r2 * scale;
t = t * scale;
// compute intrisic parameters
// Form equations in Scaramuzza's paper
// A Toolbox for Easily Calibrating Omnidirectional Cameras
//这里计算焦距和t3
//这里是把式子写成了AX=B的形式 并没有求偏导 X=f,1/f,t3
cv::Mat A(n_point*2, 3, CV_64F);
cv::Mat((r1[1]*x + r2[1]*y + t[1])/2).copyTo(A.rowRange(0, n_point).col(0));
cv::Mat((r1[0]*x + r2[0]*y + t[0])/2).copyTo(A.rowRange(n_point, 2*n_point).col(0));
cv::Mat(-A.col(0).rowRange(0, n_point).mul(sqrRho)).copyTo(A.col(1).rowRange(0, n_point));
cv::Mat(-A.col(0).rowRange(n_point, 2*n_point).mul(sqrRho)).copyTo(A.col(1).rowRange(n_point, 2*n_point));
cv::Mat(-v).copyTo(A.rowRange(0, n_point).col(2));
cv::Mat(-u).copyTo(A.rowRange(n_point, 2*n_point).col(2));
// Operation to avoid bad numerical-condition of A
//列归一化 相当于求出来的结果 是乘了一个maxA[j] 这个和A的误差式相关A的参数尺度差距越大,受误差影响越大
cv::Vec3d maxA, minA;
for (int j = 0; j < A.cols; j++)
{
cv::minMaxLoc(cv::abs(A.col(j)), &minA[j], &maxA[j]);
A.col(j) = A.col(j) / maxA[j];
}
cv::Mat B(n_point*2 , 1, CV_64F);
cv::Mat(v.mul(r1[2]*x + r2[2]*y)).copyTo(B.rowRange(0, n_point));
cv::Mat(u.mul(r1[2]*x + r2[2]*y)).copyTo(B.rowRange(n_point, 2*n_point));
cv::Mat res = A.inv(cv::DECOMP_SVD) * B;
//算出真正的结果
res = res.mul(1/cv::Mat(maxA));
// f = sqrt( f / (1/f) )
double gamma = sqrt(res.at<double>(0) / res.at<double>(1));
t[2] = res.at<double>(2);
//r3和 r1 r2 正交,所以这地方叉乘
cv::Vec3d r3 = r1.cross(r2);
cv::Matx33d R(r1[0], r2[0], r3[0],
r1[1], r2[1], r3[1],
r1[2], r2[2], r3[2]);
cv::Vec3d om;
//转变为旋转向量
cv::Rodrigues(R, om);
// project pattern points to images
cv::Mat projedImgPoints;
//构建内参矩阵
cv::Matx33d Kc(gamma, 0, u0, 0, gamma, v0, 0, 0, 1);
// reproject error
//计算点映射 3d点到2d点
projectPoints(objPoints, projedImgPoints, om, t, Kc, 1, cv::Matx14d(0, 0, 0, 0), cv::noArray());
//获取重投影误差
double reprojectError = computeMeanReproErr(imgPoints, projedImgPoints);
// if this reproject error is smaller
//获取一个重投影误差最小的
if (reprojectError < miniReprojectError)
{
miniReprojectError = reprojectError;
v_omAll[image_idx] = om;
v_tAll[image_idx] = t;
gammaAll[image_idx] = gamma;
}
}
}
}
// filter initial results whose reproject errors are too large
// 过滤重投影误差过大的初始结果
std::vector<double> reProjErrorFilter,v_gammaFilter;
std::vector<cv::Vec3d> omFilter, tFilter;
double gammaFinal = 0;
// 获取中值
size_t n = gammaAll.size() / 2;
std::nth_element(gammaAll.begin(), gammaAll.begin()+n, gammaAll.end());
gammaFinal = gammaAll[n];
//中值构建K
_K = cv::Mat(cv::Matx33d(gammaFinal, 0, u0, 0, gammaFinal, v0, 0, 0, 1));
_K.convertTo(K, CV_64F);
std::vector<int> _idx;
// recompute reproject error using the final gamma
//然后利用重建的中值gammaFinal重新计算重投影误差,对于误差太大的图片直接抛弃掉,返回用了那些图像,以及R,t
for (int i = 0; i< n_img; i++)
{
cv::Mat _projected;
//获取对应3d点 在2d平面上的对应点。此时R t K是之前的求出来的
projectPoints(patternPoints.getMat(i), _projected, v_omAll[i], v_tAll[i], _K, 1, cv::Matx14d(0, 0, 0, 0), cv::noArray());
//求每张图片的重投影误差
double _error = computeMeanReproErr(imagePoints.getMat(i), _projected);
//如果误差小于100 就保留这张图 把这张图的id加入到_idx中,并记录旋转矩阵和平移,有没有必要 再算一遍保留图片的R,t 后续需要再测试一下 能留下多少张图
//建议t校验
if(_error < 100)
{
_idx.push_back(i);
omFilter.push_back(v_omAll[i]);
tFilter.push_back(v_tAll[i]);
}
}
if (idx.needed())
{
idx.create(1, (int)_idx.size(), CV_32S);
cv::Mat idx_m = idx.getMat();
for (int j = 0; j < (int)idx_m.total(); j++)
{
idx_m.at<int>(j) = _idx[j];
}
}
//判断一下输出的是旋转矩阵还是旋转向量
if(omAll.kind() == cv::_InputArray::STD_VECTOR_MAT)
{
for (int i = 0; i < (int)omFilter.size(); ++i)
{
omAll.getMat(i) = cv::Mat(omFilter[i]);
tAll.getMat(i) = cv::Mat(tFilter[i]);
}
}
else
{
cv::Mat(omFilter).convertTo(omAll, CV_64FC3);
cv::Mat(tFilter).convertTo(tAll, CV_64FC3);
}
xi = 1;
}
//编码参数 (内参,外参旋转,外参平移,畸变,xi,输出参数) 把初始值进行写入
void encodeParameters(cv::InputArray K, cv::InputArrayOfArrays omAll, cv::InputArrayOfArrays tAll, cv::InputArray distoaration, double xi, cv::OutputArray parameters)
{
CV_Assert(K.type() == CV_64F && K.size() == cv::Size(3,3));
CV_Assert(distoaration.total() == 4 && distoaration.type() == CV_64F);
int n = (int)omAll.total();
cv::Mat _omAll = omAll.getMat(), _tAll = tAll.getMat();
cv::Matx33d _K = K.getMat();
cv::Vec4d _D = (cv::Vec4d)distoaration.getMat();
parameters.create(1, 10+6*n,CV_64F);
cv::Mat _params = parameters.getMat();
for (int i = 0; i < n; i++)
{
cv::Mat(_omAll.at<cv::Vec3d>(i)).reshape(1, 1).copyTo(_params.colRange(i*6, i*6+3));
cv::Mat(_tAll.at<cv::Vec3d>(i)).reshape(1, 1).copyTo(_params.colRange(i*6+3, (i+1)*6));
}
_params.at<double>(0, 6*n) = _K(0,0);
_params.at<double>(0, 6*n+1) = _K(1,1);
_params.at<double>(0, 6*n+2) = _K(0,1);
_params.at<double>(0, 6*n+3) = _K(0,2);
_params.at<double>(0, 6*n+4) = _K(1,2);
_params.at<double>(0, 6*n+5) = xi;
_params.at<double>(0, 6*n+6) = _D[0];
_params.at<double>(0, 6*n+7) = _D[1];
_params.at<double>(0, 6*n+8) = _D[2];
_params.at<double>(0, 6*n+9) = _D[3];
}
//根据flag 进行参数筛选 我把这个干掉了
void flags2idx(int flags, std::vector<int>& idx, int n)
{
idx = std::vector<int>(6*n+10,1);
int _flags = flags;
}
//矩阵筛选
void subMatrix(const cv::Mat& src, cv::Mat& dst, const std::vector<int>& cols, const std::vector<int>& rows)
{
CV_Assert(src.type() == CV_64FC1);
int nonzeros_cols = cv::countNonZero(cols);
cv::Mat tmp(src.rows, nonzeros_cols, CV_64FC1);
for (int i = 0, j = 0; i < (int)cols.size(); i++)
{
if (cols[i])
{
src.col(i).copyTo(tmp.col(j++));
}
}
int nonzeros_rows = cv::countNonZero(rows);
cv::Mat tmp1(nonzeros_rows, nonzeros_cols, CV_64FC1);
for (int i = 0, j = 0; i < (int)rows.size(); i++)
{
if (rows[i])
{
tmp.row(i).copyTo(tmp1.row(j++));
}
}
dst = tmp1.clone();
}
//计算雅克比矩阵 objectPoints是3d点坐标,imagePoints是2d点坐标 parameters是优化参数列表, JTJ_inv 是JTJ的逆矩阵 JTE=JT*ERROR flags是输入的优化标志位 epsilon是
void computeJacobian(cv::InputArrayOfArrays objectPoints, cv::InputArrayOfArrays imagePoints,cv::InputArray parameters, cv::Mat& JTJ, cv::Mat& JTE, int flags, double epsilon)
{
//参数检查
CV_Assert(!objectPoints.empty() && objectPoints.type() == CV_64FC3);
CV_Assert(!imagePoints.empty() && imagePoints.type() == CV_64FC2);
int n = (int)objectPoints.total();
//初始化矩阵大小
// cv::Mat JTJ = cv::Mat::zeros(10 + 6*n, 10 + 6*n, CV_64F);
JTJ = cv::Mat::zeros(10 + 6*n, 10 + 6*n, CV_64F);
JTE =cv:: Mat::zeros(10 + 6*n, 1, CV_64F);
//获取一共有多少个点
int nPointsAll = 0;
for (int i = 0; i < n; ++i)
{
nPointsAll += (int)objectPoints.getMat(i).total();
}
//初始化矩阵大小
cv::Mat J = cv::Mat::zeros(2*nPointsAll, 10+6*n, CV_64F);
cv::Mat exAll = cv::Mat::zeros(2*nPointsAll, 10+6*n, CV_64F); // 被删除使用了
//优化变量的指针
double *para = parameters.getMat().ptr<double>();
//通过读取优化变量 复原内参矩阵K,畸变参数D,xi
cv::Matx33d K(para[6*n], para[6*n+2], para[6*n+3],
0, para[6*n+1], para[6*n+4],
0, 0, 1);
cv::Matx14d D(para[6*n+6], para[6*n+7], para[6*n+8], para[6*n+9]);
double xi = para[6*n+5];
//遍历所有图片 每张图片有objectPoints.getMat(i).total()个点,对应2*objectPoints.getMat(i).total() 行 10 + 6*n的列
for (int i = 0; i < n; i++)
{
//数据格式变换
cv::Mat objPoints, imgPoints, om, T;
objectPoints.getMat(i).copyTo(objPoints);
imagePoints.getMat(i).copyTo(imgPoints);
objPoints = objPoints.reshape(3, objPoints.rows*objPoints.cols);
imgPoints = imgPoints.reshape(2, imgPoints.rows*imgPoints.cols);
//读取外参参数
om = parameters.getMat().colRange(i*6, i*6+3);
T = parameters.getMat().colRange(i*6+3, (i+1)*6);
cv::Mat imgProj, jacobian;
//计算雅克比矩阵与重投影的点位置
projectPoints(objPoints, imgProj, om, T, K, xi, D, jacobian);
//计算重投影误差
cv::Mat projError = imgPoints - imgProj;
// The intrinsic part of Jacobian
//jacobian.rows = 2*objectPoints.getMat(i).total() 行 这里做了矩阵分解
/*J =
Jex1 0 0 ... 0 Jin1
0 Jex2 0 ... 0 Jin2
0 0 Jex3 ... 0 Jin3
... ... ... ... ... ...
0 0 0 ... Jexn Jinn
其中Jex是外参相关 Jin1是内参相关
JtJ = (必为对称阵,所以只写一半)
Jex1^T*Jex1 0 ... 0 Jex1^T*Jin1
Jex2^T*Jex2 ... 0 Jex2^T*Jin2
... ... ...
Jexn^T*Jexn Jexn^T*Jinn
sum(Jink^T*Jink)
JTE 是方程右侧 JTE = Jt * error
Jex1^T*Jin1
Jex2^T*Jin2
...
Jexn^T*Jinn
sum(Jink^T*Jink)
*/
cv::Mat JIn(jacobian.rows, 10, CV_64F);
cv::Mat JEx(jacobian.rows, 6, CV_64F);
jacobian.colRange(6, 16).copyTo(JIn);
jacobian.colRange(0, 6).copyTo(JEx);
//Rect(x,y,width,height) = Rect(col,row,deltacol,deltarow)
JTJ(cv::Rect(6*n, 6*n, 10, 10)) = JTJ(cv::Rect(6*n, 6*n, 10, 10)) + JIn.t()*JIn;
JTJ(cv::Rect(i*6, i*6, 6, 6)) = JEx.t() * JEx;
cv::Mat JExTIn = JEx.t() * JIn;
JExTIn.copyTo(JTJ(cv::Rect(6*n, i*6, 10, 6)));
cv::Mat(JIn.t()*JEx).copyTo(JTJ(cv::Rect(i*6, 6*n, 6, 10)));
JTE(cv::Rect(0, 6*n, 1, 10)) = JTE(cv::Rect(0, 6*n,1, 10)) + JIn.t() * projError.reshape(1, 2*(int)projError.total());
JTE(cv::Rect(0, i*6, 1, 6)) = JEx.t() * projError.reshape(1, 2*(int)projError.total());
//int nPoints = objectPoints.getMat(i).total();
//JIn.copyTo(J(Rect(6*n, i*nPoints*2, 10, nPoints*2)));
//JEx.copyTo(J(Rect(6*i, i*nPoints*2, 6, nPoints*2)));
//projError.reshape(1, 2*projError.rows).copyTo(exAll.rowRange(i*2*nPoints, (i+1)*2*nPoints));
}
//JTJ = J.t()*J;
//JTE = J.t()*exAll;
//至此构建了大矩阵
std::vector<int> _idx(6*n+10, 1);
//根据flag进行参数筛选,flag里面规定了那些参数不优化,如果这些参数不优化 就删除JTJ对应列行和JTE对应行
flags2idx(flags, _idx, n);
subMatrix(JTJ, JTJ, _idx, _idx);
subMatrix(JTE, JTE, std::vector<int>(1, 1), _idx);
// in case JTJ is singular
//SVD svd(JTJ, SVD::NO_UV);
//double cond = svd.w.at<double>(0)/svd.w.at<double>(5);
//if (cond_JTJ.needed())
//{
// cond_JTJ.create(1, 1, CV_64F);
// cond_JTJ.getMat().at<double>(0) = cond;
//}
//double epsilon = 1e-4*std::exp(cond);
//这里直接求逆矩阵 是不是可能会有问题?
}
// 将求出来的值进行代入,然后把锁定的参数赋0
void fillFixed(cv::Mat&G, int flags, int n)
{
cv::Mat tmp = G.clone();
std::vector<int> idx(6*n + 10, 1);
flags2idx(flags, idx, n);
G.release();
G.create(6*n +10, 1, CV_64F);
G = cv::Mat::zeros(6*n +10, 1, CV_64F);
for (int i = 0,j=0; i < (int)idx.size(); i++)
{
if (idx[i])
{
G.at<double>(i) = tmp.at<double>(j++);
}
}
}
//参数解码 将每个变量 提取出来
void decodeParameters(cv::InputArray parameters, cv::OutputArray K, cv::OutputArrayOfArrays omAll, cv::OutputArrayOfArrays tAll, cv::OutputArray distoration, double& xi)
{
if(K.empty())
K.create(3,3,CV_64F);
cv::Matx33d _K;
int n = (int)(parameters.total()-10)/6;
if(omAll.empty())
omAll.create(1, n, CV_64FC3);
if(tAll.empty())
tAll.create(1, n, CV_64FC3);
if(distoration.empty())
distoration.create(1, 4, CV_64F);
cv::Matx14d _D = distoration.getMat();
cv::Mat param = parameters.getMat();
double *para = param.ptr<double>();
_K = cv::Matx33d(para[6*n], para[6*n+2], para[6*n+3],
0, para[6*n+1], para[6*n+4],
0, 0, 1);
_D = cv::Matx14d(para[6*n+6], para[6*n+7], para[6*n+8], para[6*n+9]);
xi = para[6*n+5];
std::vector<cv::Vec3d> _omAll(n), _tAll(n);
for (int i = 0; i < n; i++)
{
_omAll[i] = cv::Vec3d(param.colRange(i*6, i*6+3));
_tAll[i] = cv::Vec3d(param.colRange(i*6+3, i*6+6));
}
cv::Mat(_D).convertTo(distoration, CV_64F);
cv::Mat(_K).convertTo(K, CV_64F);
if (omAll.kind() == cv::_InputArray::STD_VECTOR_MAT)
{
for (int i = 0; i < n; ++i)
{
cv::Mat(_omAll[i]).copyTo(omAll.getMat(i));
cv::Mat(_tAll[i]).copyTo(tAll.getMat(i));
}
}
else
{
cv::Mat(_omAll).convertTo(omAll, CV_64FC3);
cv::Mat(_tAll).convertTo(tAll, CV_64FC3);
}
}
//估计不确定度
void estimateUncertainties(cv::InputArrayOfArrays objectPoints, cv::InputArrayOfArrays imagePoints, cv::InputArray parameters, cv::Mat& errors, cv::Vec2d& std_error, double& rms, int flags)
{
//参数检验
CV_Assert(!objectPoints.empty() && objectPoints.type() == CV_64FC3);
CV_Assert(!imagePoints.empty() && imagePoints.type() == CV_64FC2);
CV_Assert(!parameters.empty() && parameters.type() == CV_64F);
//多少张图片
int n = (int) objectPoints.total();
// 这里做了个假设,每个图要有的点数相同 (所以这里要检查输出,每张图片都要检测到所有的角点)
int nPointsAll = 0;
for (int i = 0; i < n; ++i)
{
nPointsAll += (int)objectPoints.getMat(i).total();
}
//误差
cv::Mat reprojError = cv::Mat(nPointsAll, 1, CV_64FC2);
//建立指针,提取参数
double* para = parameters.getMat().ptr<double>();
cv::Matx33d K(para[6*n], para[6*n+2], para[6*n+3],
0, para[6*n+1], para[6*n+4],
0, 0, 1);
cv::Matx14d D(para[6*n+6], para[6*n+7], para[6*n+8], para[6*n+9]);
double xi = para[6*n+5];
int nPointsAccu = 0;
//遍历所有图片
for(int i=0; i < n; ++i)
{
//类型变换
cv::Mat imgPoints, objPoints;
imagePoints.getMat(i).copyTo(imgPoints);
objectPoints.getMat(i).copyTo(objPoints);
imgPoints = imgPoints.reshape(2, imgPoints.rows*imgPoints.cols);
objPoints = objPoints.reshape(3, objPoints.rows*objPoints.cols);
//提取外参
cv::Mat om = parameters.getMat().colRange(i*6, i*6+3);
cv::Mat T = parameters.getMat().colRange(i*6+3, (i+1)*6);
cv::Mat x;
//计算3d棋盘点到2d点映射
projectPoints(objPoints, x, om, T, K, xi, D, cv::noArray());
//求误差
cv::Mat errorx = (imgPoints - x);
//reprojError.rowRange(errorx.rows*i, errorx.rows*(i+1)) = errorx.clone();
//保存每张照片每个点的误差reprojError
errorx.copyTo(reprojError.rowRange(nPointsAccu, nPointsAccu + (int)errorx.total()));
nPointsAccu += (int)errorx.total();
}
//返回reprojError的标准差
meanStdDev(reprojError, cv::noArray(), std_error);
//无偏估计,这是因为 这是统计值,而非概率值,自由度少了1
std_error *= sqrt((double)reprojError.total()/((double)reprojError.total() - 1.0));
//这是把两个点视为相同权重,再做估计
cv::Mat sigma_x;
meanStdDev(reprojError.reshape(1,1), cv::noArray(), sigma_x);
sigma_x *= sqrt(2.0*(double)reprojError.total()/(2.0*(double)reprojError.total() - 1.0));
double s = sigma_x.at<double>(0);
//
cv::Mat _JTJ_inv, _JTE;
computeJacobian(objectPoints, imagePoints, parameters, _JTJ_inv, _JTE, flags, 0.0);
sqrt(_JTJ_inv, _JTJ_inv);
errors = 3 * s * _JTJ_inv.diag();
//重投影误差
rms = 0;
const cv::Vec2d* ptr_ex = reprojError.ptr<cv::Vec2d>();
for (int i = 0; i < (int)reprojError.total(); i++)
{
rms += ptr_ex[i][0] * ptr_ex[i][0] + ptr_ex[i][1] * ptr_ex[i][1];
}
rms /= (double)reprojError.total();
rms = sqrt(rms);
}
void computereduce(cv::Mat&JTJ,cv::Mat&JTJ2,cv::Mat&JTJ_mean)
{
cv::reduce(JTJ, JTJ_mean, 0, cv::REDUCE_AVG);
JTJ2 = cv::Mat::zeros(JTJ.size(), CV_64F);
for(size_t j = 0;j < JTJ.cols;j++)
{
for(size_t i = 0;i < JTJ.rows;i++)
{
JTJ2.at<double>(i,j) = JTJ.at<double>(i,j)/JTJ_mean.at<double>(0,j);
}
}
}