1. cv::detail::Estimator Class 部分的代码,
//
bool BundleAdjusterBase::estimate(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras)
{
LOG_CHAT("Bundle adjustment");
#if ENABLE_LOG
int64 t = getTickCount();
#endif
num_images_ = static_cast<int>(features.size());
features_ = &features[0];
pairwise_matches_ = &pairwise_matches[0];
// 初始化相机的4个参数,focal、ppx、ppy以及aspect,并计算旋转矩阵 Rodrigues
setUpInitialCameraParams(cameras);
// Leave only consistent image pairs
// 根据 conf_thresh_ 对 image pairs 进行筛选
edges_.clear();
for (int i = 0; i < num_images_ - 1; ++i)
{
for (int j = i + 1; j < num_images_; ++j)
{
const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];
if (matches_info.confidence > conf_thresh_)
edges_.push_back(std::make_pair(i, j));
}
}
// Compute number of correspondences
// 计算损失函数,更新参数
total_num_matches_ = 0;
for (size_t i = 0; i < edges_.size(); ++i)
total_num_matches_ += static_cast<int>(pairwise_matches[edges_[i].first * num_images_ +
edges_[i].second].num_inliers);
// CvLevMarq()函数是Levenberg-Marquardt非线性最小二乘算法的实现,用于迭代地找到函数的局部最小值,该函数表示为非线性函数的平方和。它可以用来非线性估计基本矩阵。
CvLevMarq solver(num_images_ * num_params_per_cam_,
total_num_matches_ * num_errs_per_measurement_,
cvTermCriteria(term_criteria_));
Mat err, jac;
CvMat matParams = cvMat(cam_params_);
cvCopy(&matParams, solver.param);
int iter = 0;
for(;;)
{
const CvMat* _param = 0;
CvMat* _jac = 0;
CvMat* _err = 0;
bool proceed = solver.update(_param, _jac, _err);
cvCopy(_param, &matParams);
if (!proceed || !_err)
break;
if (_jac)
{
calcJacobian(jac);
CvMat tmp = cvMat(jac);
cvCopy(&tmp, _jac);
}
if (_err)
{
calcError(err);
LOG_CHAT(".");
iter++;
CvMat tmp = cvMat(err);
cvCopy(&tmp, _err);
}
}
LOGLN_CHAT("");
LOGLN_CHAT("Bundle adjustment, final RMS error: " << std::sqrt(err.dot(err) / total_num_matches_));
LOGLN_CHAT("Bundle adjustment, iterations done: " << iter);
// Check if all camera parameters are valid
// 验证更新得到的最终参数是否有效
bool ok = true;
for (int i = 0; i < cam_params_.rows; ++i)
{
if (cvIsNaN(cam_params_.at<double>(i,0)))
{
ok = false;
break;
}
}
if (!ok)
return false;
// 更新得到最终的参数
obtainRefinedCameraParams(cameras);
// Normalize motion to center image
// 将所有其他图片都按照中心图片得到旋转矩阵
Graph span_tree;
std::vector<int> span_tree_centers;
findMaxSpanningTree(num_images_, pairwise_matches, span_tree, span_tree_centers);
Mat R_inv = cameras[span_tree_centers[0]].R.inv();
for (int i = 0; i < num_images_; ++i)
cameras[i].R = R_inv * cameras[i].R;
LOGLN_CHAT("Bundle adjustment, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
return true;
}
2. BundleAdjusterRay 部分的代码
void BundleAdjusterRay::calcError(Mat &err)
{
// 计算旋转投影之后的误差矩阵
err.create(total_num_matches_ * 3, 1, CV_64F);
int match_idx = 0;
for (size_t edge_idx = 0; edge_idx < edges_.size(); ++edge_idx)
{
int i = edges_[edge_idx].first;
int j = edges_[edge_idx].second;
double f1 = cam_params_.at<double>(i * 4, 0);
double f2 = cam_params_.at<double>(j * 4, 0);
double R1[9];
Mat R1_(3, 3, CV_64F, R1);
Mat rvec(3, 1, CV_64F);
rvec.at<double>(0, 0) = cam_params_.at<double>(i * 4 + 1, 0);
rvec.at<double>(1, 0) = cam_params_.at<double>(i * 4 + 2, 0);
rvec.at<double>(2, 0) = cam_params_.at<double>(i * 4 + 3, 0);
Rodrigues(rvec, R1_);
double R2[9];
Mat R2_(3, 3, CV_64F, R2);
rvec.at<double>(0, 0) = cam_params_.at<double>(j * 4 + 1, 0);
rvec.at<double>(1, 0) = cam_params_.at<double>(j * 4 + 2, 0);
rvec.at<double>(2, 0) = cam_params_.at<double>(j * 4 + 3, 0);
Rodrigues(rvec, R2_);
const ImageFeatures& features1 = features_[i];
const ImageFeatures& features2 = features_[j];
const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];
Mat_<double> K1 = Mat::eye(3, 3, CV_64F);
K1(0,0) = f1; K1(0,2) = features1.img_size.width * 0.5;
K1(1,1) = f1; K1(1,2) = features1.img_size.height * 0.5;
Mat_<double> K2 = Mat::eye(3, 3, CV_64F);
K2(0,0) = f2; K2(0,2) = features2.img_size.width * 0.5;
K2(1,1) = f2; K2(1,2) = features2.img_size.height * 0.5;
Mat_<double> H1 = R1_ * K1.inv();
Mat_<double> H2 = R2_ * K2.inv();
for (size_t k = 0; k < matches_info.matches.size(); ++k)
{
if (!matches_info.inliers_mask[k])
continue;
const DMatch& m = matches_info.matches[k];
Point2f p1 = features1.keypoints[m.queryIdx].pt;
double x1 = H1(0,0)*p1.x + H1(0,1)*p1.y + H1(0,2);
double y1 = H1(1,0)*p1.x + H1(1,1)*p1.y + H1(1,2);
double z1 = H1(2,0)*p1.x + H1(2,1)*p1.y + H1(2,2);
double len = std::sqrt(x1*x1 + y1*y1 + z1*z1);
x1 /= len; y1 /= len; z1 /= len;
Point2f p2 = features2.keypoints[m.trainIdx].pt;
double x2 = H2(0,0)*p2.x + H2(0,1)*p2.y + H2(0,2);
double y2 = H2(1,0)*p2.x + H2(1,1)*p2.y + H2(1,2);
double z2 = H2(2,0)*p2.x + H2(2,1)*p2.y + H2(2,2);
len = std::sqrt(x2*x2 + y2*y2 + z2*z2);
x2 /= len; y2 /= len; z2 /= len;
double mult = std::sqrt(f1 * f2);
err.at<double>(3 * match_idx, 0) = mult * (x1 - x2);
err.at<double>(3 * match_idx + 1, 0) = mult * (y1 - y2);
err.at<double>(3 * match_idx + 2, 0) = mult * (z1 - z2);
match_idx++;
}
}
}
void BundleAdjusterRay::calcJacobian(Mat &jac)
{
// 用雅各比矩阵计算梯度更新相机参数
jac.create(total_num_matches_ * 3, num_images_ * 4, CV_64F);
double val;
const double step = 1e-3;
for (int i = 0; i < num_images_; ++i)
{
for (int j = 0; j < 4; ++j)
{
val = cam_params_.at<double>(i * 4 + j, 0);
cam_params_.at<double>(i * 4 + j, 0) = val - step;
calcError(err1_);
cam_params_.at<double>(i * 4 + j, 0) = val + step;
calcError(err2_);
calcDeriv(err1_, err2_, 2 * step, jac.col(i * 4 + j));
cam_params_.at<double>(i * 4 + j, 0) = val;
}
}
}