《视觉slam十四讲》第七讲课后习题

 

1.除了本书的ORB特征点,你还能找到哪些特征点?请说说SIFT或SURF的原理,并对比它们与ORB之间的优劣。

除了ORB之外,一般还有还有SIFT、SURF、BRISK、AKAZE,这些都是在OpenCV中已经实现了的。

SIFT算法,又称为尺度不变特征转换(Scale-invariant feature transform),大致方法是首先搜索所有尺度下图像的位置,通过高斯微分函数来识别潜在的对于尺度和旋转不变的兴趣点,然后在候选位置通过拟合精细的模型来确定位置和尺度,再基于图像梯度,分配给关键点一个或多个方向,最后在每个关键点的周围邻域,在选定的尺度下测量图像局部梯度来描述关键点。

SURF算法,又称为加速稳健特征(Speeded Up Robust Features),其方法和构建图像金字塔的方法相反,其主要方法是通过修改滤波器的尺寸和模糊度从而得到不同层级的影像,但是寻找特征点的方法和SIFT类似。

SIFT和SURF能够比较稳定地提供较高的准确率,其中SIFT比SURF跟准确一点,但是二者都特别慢。相较而言ORB速度更快,但是更容易出现问题,而且错误率远比其他二者大。

 

2.设计程序调用OpenCV中其它种类的特征点。统计在提取1000个特征点在你机器上所使用的时间

我用的机器是intel NUC ,处理器是Intel® Core™ i5-7260U CPU @ 2.20GHz × 4,64位,内存8G,固态硬盘250G。

系统是ubuntu16.04,IDE是KDevelop,调用OpenCV3.2及contrib模块庫。

这是本次测试的图片(随手拍的)。

CMakeLists.txt代码如下:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_2)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11 -O3")

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package(OpenCV REQUIRED)

include_directories(
  ${OpenCV_INCLUDE_DIRS}
)

add_executable(homeworks7_2 homeworks7_2.cpp)

target_link_libraries(homeworks7_2 ${OpenCV_LIBS})

install(TARGETS homeworks7_2 RUNTIME DESTINATION bin)

cpp文件代码如下:

#include <iostream>
#include <ctime>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/xfeatures2d.hpp>

using namespace std;

int main(int argc, char **argv) {
    
    cv::Mat img = cv::imread("/home/fuhang/projects/homeworks7_2/lab.png", CV_LOAD_IMAGE_COLOR);
    
    cv::namedWindow("img", 0);
    cv::resizeWindow("img", 1280, 1024);
    cv::imshow("img", img);
    cv::waitKey(0);
    
    std::vector<cv::KeyPoint> keypoint;
    
    //orb算法
    cv::Ptr<cv::ORB> orb = cv::ORB::create(1000);
    clock_t t = clock();    
    orb->detect(img,keypoint);
    cout << "*******************************" << endl;
    cout << "共找到了" << keypoint.size() << "个特征点;" << endl;   
    cout << "ORB算法用时" << 1000 * (clock() - t) / (double)CLOCKS_PER_SEC << "ms" << endl;
    
    cv::Mat img_orb;
    cv::drawKeypoints(img, keypoint, img_orb, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    cv::imshow("img", img_orb);
    cv::waitKey(0);
    
    //sift算法
    cv::Ptr<cv::Feature2D> sift = cv::xfeatures2d::SIFT::create(1000);
    t = clock();
    
    sift->detect(img,keypoint);
    cout << "*******************************" << endl;
    cout << "共找到了" << keypoint.size() << "个特征点;" << endl;   
    cout << "SIFT算法用时" << 1000 * (clock() - t) / (double)CLOCKS_PER_SEC << "ms" << endl;
    
    cv::Mat img_sift;
    cv::drawKeypoints(img, keypoint, img_sift, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    cv::imshow("img", img_sift);
    cv::waitKey(0);
    
    //surf算法
    cv::Ptr<cv::Feature2D> surf = cv::xfeatures2d::SURF::create(2800);
    t = clock();
    
    surf->detect(img,keypoint);
    cout << "*******************************" << endl;
    cout << "共找到了" << keypoint.size() << "个特征点;" << endl;   
    cout << "SURF算法用时" << 1000 * (clock() - t) / (double)CLOCKS_PER_SEC << "ms" << endl;
    
    cv::Mat img_surf;
    cv::drawKeypoints(img, keypoint, img_surf, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    cv::imshow("img", img_surf);
    cv::waitKey(0);
    
    //brisk算法
    cv::Ptr<cv::BRISK> brisk = cv::BRISK::create(68);
    t = clock();    
    brisk->detect(img,keypoint);
    cout << "*******************************" << endl;
    cout << "共找到了" << keypoint.size() << "个特征点;" << endl;   
    cout << "BRISK算法用时" << 1000 * (clock() - t) / (double)CLOCKS_PER_SEC << "ms" << endl;
    
    cv::Mat img_brisk;
    cv::drawKeypoints(img, keypoint, img_brisk, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    cv::imshow("img", img_brisk);
    cv::waitKey(0);
    
    //akaze算法
    cv::Ptr<cv::AKAZE> akaze = cv::AKAZE::create(2, 0, 3, 0.005f, 4, 4, 3);
    t = clock();    
    akaze->detect(img,keypoint);
    cout << "*******************************" << endl;
    cout << "共找到了" << keypoint.size() << "个特征点;" << endl;   
    cout << "AKAZE算法用时" << 1000 * (clock() - t) / (double)CLOCKS_PER_SEC << "ms" << endl;
    
    cv::Mat img_akaze;
    cv::drawKeypoints(img, keypoint, img_akaze, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    cv::imshow("img", img_akaze);
    cv::waitKey(0);
   
    return 0;
}

运行这个代码时,你可以边按enter边观察各个算法对图像特征点的提取情况,由于篇幅限制,在这就不提及了。在这主要是展现代码对各个算法提取1000个特征点执行时间的测量,其运行结果如下所示:

运行完代码你会发现,特征点检测算法的考虑指标就是精度和速度的均衡。SIFT框架下算法精度和稳定性明显优于ORB、BRISK等快算二值描述子算法,但是其速度明显慢很多。SURF算法是SIFT算法的加速版本,但是还是比较慢。AKAZE算法对于斑点噪声严重的SAR图像性能远高于其他算法。但是在深度学习大行其道的今天,经典局部特征提取算法正在逐渐退出舞台。

 

3.我们发现OpenCV提供的ORB算法在图像中提取的特征点分布并不是很均匀,有没有让特征点分布均匀的方法?

百度一搜做的人还不少,比如2016年出的ORB_SLAM2中已经用四叉树实现的特征点均匀分布算法,然后有找到一篇2018年5月发表的论文,也是用四叉树的方法,不过略有不同。大家有兴趣可以去读一下刘硕士的《ORB特征四叉树均匀分布算法》。大部分都是将图层不停四叉树分割,然后取其中比较好的keypoint保留。

在这就直接将orb中的各种函数包装写成一个main函数,并方便像我这种小白对运行顺序一目了然:

CMakeLists.txt代码如下:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_3)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11 -O3")

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package(OpenCV REQUIRED)

include_directories(
  ${OpenCV_INCLUDE_DIRS}
)

add_executable(homeworks7_3 homeworks.cpp)
target_link_libraries(homeworks7_3 ${OpenCV_LIBS})

install(TARGETS homeworks7_3 RUNTIME DESTINATION bin)

homeworks.cpp代码如下:

#include <iostream>
#include <list>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/imgproc/imgproc.hpp>

using namespace std;
using namespace cv;

int features_num = 1000; //最大特征点数量
float scale_factor = 1.2f; //金字塔之间尺度参数
int levels_num = 8; //金字塔层数
int default_fast_threshold = 20; //fast角点检测时候的阈值
int min_fast_threshold = 7; //最小fast检测阈值
int EDGE_THRESHOLD = 19; //过滤掉边缘效应的阈值
int PATCH_SIZE = 31;
int HALF_PATH_SIZE = 15;

//定义一个四叉树节点类型的类,类里面定义了一些成员及函数
class ExtractorNode{
public:
  ExtractorNode() : bNoMore(false) {}
  void DivideNode(ExtractorNode &n1, ExtractorNode &n2, ExtractorNode &n3, ExtractorNode &n4); //分配节点函数
  std::vector<cv::KeyPoint> vKeys; //节点keypoints容器
  cv::Point2i UL,UR,BL,BR; //二维整数点类型数据u的上下左右像素
  std::list<ExtractorNode>::iterator lit; //节点类型列表迭代器
  bool bNoMore; //确认是否之含有一个特征点
};


//定义DivideNode函数
void ExtractorNode::DivideNode(ExtractorNode& n1, ExtractorNode& n2, ExtractorNode& n3, ExtractorNode& n4)
{
  /*
   * 
   * -----------------------------------------------------------------------
   *	/                               /                               /
   *	/                               /				/
   *	/                               /				/
   *	/                               /				/
   *	/             n1                /		n2		/
   *	/                               /				/
   *	/                               /				/
   *	/                               /				/
   *	/                               / 				/
   *	/-------------------------------/-------------------------------	 
   *	/                               / 				/
   *	/                               / 				/
   *	/                               / 				/
   *	/               n3              / 		n4		/
   *	/                               / 				/
   *	/                               / 				/
   *	/                               / 				/
   *	/                               / 				/
   *	/                               /				/
   *---------------------------------------------------------------------------
   * 
   */
  const int halfx = ceil(static_cast<float>(UR.x-UL.x)/2);
  const int halfy = ceil(static_cast<float>(BR.y-UL.y)/2);
  
  // 矩形切四块
  n1.UL = UL;
  n1.UR = cv::Point2i(UL.x+halfy, UL.y);
  n1.BL = cv::Point2i(UL.x, UL.y+halfy);
  n1.BR = cv::Point2i(UL.x+halfx, UL.y+halfy);
  n1.vKeys.reserve(vKeys.size());
  
  n2.UL = n1.UR;
  n2.UR = UR;
  n2.BL = n1.BR;
  n2.BR = cv::Point2i(UR.x, UL.y+halfy);
  n2.vKeys.reserve(vKeys.size());
  
  n3.UL = n1.BL;
  n3.UR = n1.BR;
  n3.BL = BL;
  n3.BR = cv::Point2i(n1.BR.x, BL.y);
  n3.vKeys.reserve(vKeys.size());
  
  n4.UL = n3.UR;
  n4.UR = n2.BR;
  n4.BL = n3.BR;
  n4.BR = BR;
  n4.vKeys.reserve(vKeys.size());
  
  for (size_t i=0; i<vKeys.size(); i++)
  {
    const cv::KeyPoint &kp = vKeys[i];
    if (kp.pt.x < n1.UR.x)
    {
      if (kp.pt.y < n1.BR.y)
	n1.vKeys.push_back(kp);
      else
	n3.vKeys.push_back(kp);
    }
    else if (kp.pt.y < n1.BR.y)
      n2.vKeys.push_back(kp);
    else
      n4.vKeys.push_back(kp);
  }
  
  if (n1.vKeys.size() == 1)
    n1.bNoMore = true;
  if (n2.vKeys.size() == 1)
    n2.bNoMore = true;
  if (n3.vKeys.size() == 1)
    n3.bNoMore = true;
  if (n4.vKeys.size() == 1)
    n4.bNoMore = true;
  
}


//计算描述子的pattern,高斯分布,也可以使用其他定义的pattern

static int bit_pattern_31[256*4] =
{
    8,-3, 9,5/*mean (0), correlation (0)*/,
    4,2, 7,-12/*mean (1.12461e-05), correlation (0.0437584)*/,
    -11,9, -8,2/*mean (3.37382e-05), correlation (0.0617409)*/,
    7,-12, 12,-13/*mean (5.62303e-05), correlation (0.0636977)*/,
    2,-13, 2,12/*mean (0.000134953), correlation (0.085099)*/,
    1,-7, 1,6/*mean (0.000528565), correlation (0.0857175)*/,
    -2,-10, -2,-4/*mean (0.0188821), correlation (0.0985774)*/,
    -13,-13, -11,-8/*mean (0.0363135), correlation (0.0899616)*/,
    -13,-3, -12,-9/*mean (0.121806), correlation (0.099849)*/,
    10,4, 11,9/*mean (0.122065), correlation (0.093285)*/,
    -13,-8, -8,-9/*mean (0.162787), correlation (0.0942748)*/,
    -11,7, -9,12/*mean (0.21561), correlation (0.0974438)*/,
    7,7, 12,6/*mean (0.160583), correlation (0.130064)*/,
    -4,-5, -3,0/*mean (0.228171), correlation (0.132998)*/,
    -13,2, -12,-3/*mean (0.00997526), correlation (0.145926)*/,
    -9,0, -7,5/*mean (0.198234), correlation (0.143636)*/,
    12,-6, 12,-1/*mean (0.0676226), correlation (0.16689)*/,
    -3,6, -2,12/*mean (0.166847), correlation (0.171682)*/,
    -6,-13, -4,-8/*mean (0.101215), correlation (0.179716)*/,
    11,-13, 12,-8/*mean (0.200641), correlation (0.192279)*/,
    4,7, 5,1/*mean (0.205106), correlation (0.186848)*/,
    5,-3, 10,-3/*mean (0.234908), correlation (0.192319)*/,
    3,-7, 6,12/*mean (0.0709964), correlation (0.210872)*/,
    -8,-7, -6,-2/*mean (0.0939834), correlation (0.212589)*/,
    -2,11, -1,-10/*mean (0.127778), correlation (0.20866)*/,
    -13,12, -8,10/*mean (0.14783), correlation (0.206356)*/,
    -7,3, -5,-3/*mean (0.182141), correlation (0.198942)*/,
    -4,2, -3,7/*mean (0.188237), correlation (0.21384)*/,
    -10,-12, -6,11/*mean (0.14865), correlation (0.23571)*/,
    5,-12, 6,-7/*mean (0.222312), correlation (0.23324)*/,
    5,-6, 7,-1/*mean (0.229082), correlation (0.23389)*/,
    1,0, 4,-5/*mean (0.241577), correlation (0.215286)*/,
    9,11, 11,-13/*mean (0.00338507), correlation (0.251373)*/,
    4,7, 4,12/*mean (0.131005), correlation (0.257622)*/,
    2,-1, 4,4/*mean (0.152755), correlation (0.255205)*/,
    -4,-12, -2,7/*mean (0.182771), correlation (0.244867)*/,
    -8,-5, -7,-10/*mean (0.186898), correlation (0.23901)*/,
    4,11, 9,12/*mean (0.226226), correlation (0.258255)*/,
    0,-8, 1,-13/*mean (0.0897886), correlation (0.274827)*/,
    -13,-2, -8,2/*mean (0.148774), correlation (0.28065)*/,
    -3,-2, -2,3/*mean (0.153048), correlation (0.283063)*/,
    -6,9, -4,-9/*mean (0.169523), correlation (0.278248)*/,
    8,12, 10,7/*mean (0.225337), correlation (0.282851)*/,
    0,9, 1,3/*mean (0.226687), correlation (0.278734)*/,
    7,-5, 11,-10/*mean (0.00693882), correlation (0.305161)*/,
    -13,-6, -11,0/*mean (0.0227283), correlation (0.300181)*/,
    10,7, 12,1/*mean (0.125517), correlation (0.31089)*/,
    -6,-3, -6,12/*mean (0.131748), correlation (0.312779)*/,
    10,-9, 12,-4/*mean (0.144827), correlation (0.292797)*/,
    -13,8, -8,-12/*mean (0.149202), correlation (0.308918)*/,
    -13,0, -8,-4/*mean (0.160909), correlation (0.310013)*/,
    3,3, 7,8/*mean (0.177755), correlation (0.309394)*/,
    5,7, 10,-7/*mean (0.212337), correlation (0.310315)*/,
    -1,7, 1,-12/*mean (0.214429), correlation (0.311933)*/,
    3,-10, 5,6/*mean (0.235807), correlation (0.313104)*/,
    2,-4, 3,-10/*mean (0.00494827), correlation (0.344948)*/,
    -13,0, -13,5/*mean (0.0549145), correlation (0.344675)*/,
    -13,-7, -12,12/*mean (0.103385), correlation (0.342715)*/,
    -13,3, -11,8/*mean (0.134222), correlation (0.322922)*/,
    -7,12, -4,7/*mean (0.153284), correlation (0.337061)*/,
    6,-10, 12,8/*mean (0.154881), correlation (0.329257)*/,
    -9,-1, -7,-6/*mean (0.200967), correlation (0.33312)*/,
    -2,-5, 0,12/*mean (0.201518), correlation (0.340635)*/,
    -12,5, -7,5/*mean (0.207805), correlation (0.335631)*/,
    3,-10, 8,-13/*mean (0.224438), correlation (0.34504)*/,
    -7,-7, -4,5/*mean (0.239361), correlation (0.338053)*/,
    -3,-2, -1,-7/*mean (0.240744), correlation (0.344322)*/,
    2,9, 5,-11/*mean (0.242949), correlation (0.34145)*/,
    -11,-13, -5,-13/*mean (0.244028), correlation (0.336861)*/,
    -1,6, 0,-1/*mean (0.247571), correlation (0.343684)*/,
    5,-3, 5,2/*mean (0.000697256), correlation (0.357265)*/,
    -4,-13, -4,12/*mean (0.00213675), correlation (0.373827)*/,
    -9,-6, -9,6/*mean (0.0126856), correlation (0.373938)*/,
    -12,-10, -8,-4/*mean (0.0152497), correlation (0.364237)*/,
    10,2, 12,-3/*mean (0.0299933), correlation (0.345292)*/,
    7,12, 12,12/*mean (0.0307242), correlation (0.366299)*/,
    -7,-13, -6,5/*mean (0.0534975), correlation (0.368357)*/,
    -4,9, -3,4/*mean (0.099865), correlation (0.372276)*/,
    7,-1, 12,2/*mean (0.117083), correlation (0.364529)*/,
    -7,6, -5,1/*mean (0.126125), correlation (0.369606)*/,
    -13,11, -12,5/*mean (0.130364), correlation (0.358502)*/,
    -3,7, -2,-6/*mean (0.131691), correlation (0.375531)*/,
    7,-8, 12,-7/*mean (0.160166), correlation (0.379508)*/,
    -13,-7, -11,-12/*mean (0.167848), correlation (0.353343)*/,
    1,-3, 12,12/*mean (0.183378), correlation (0.371916)*/,
    2,-6, 3,0/*mean (0.228711), correlation (0.371761)*/,
    -4,3, -2,-13/*mean (0.247211), correlation (0.364063)*/,
    -1,-13, 1,9/*mean (0.249325), correlation (0.378139)*/,
    7,1, 8,-6/*mean (0.000652272), correlation (0.411682)*/,
    1,-1, 3,12/*mean (0.00248538), correlation (0.392988)*/,
    9,1, 12,6/*mean (0.0206815), correlation (0.386106)*/,
    -1,-9, -1,3/*mean (0.0364485), correlation (0.410752)*/,
    -13,-13, -10,5/*mean (0.0376068), correlation (0.398374)*/,
    7,7, 10,12/*mean (0.0424202), correlation (0.405663)*/,
    12,-5, 12,9/*mean (0.0942645), correlation (0.410422)*/,
    6,3, 7,11/*mean (0.1074), correlation (0.413224)*/,
    5,-13, 6,10/*mean (0.109256), correlation (0.408646)*/,
    2,-12, 2,3/*mean (0.131691), correlation (0.416076)*/,
    3,8, 4,-6/*mean (0.165081), correlation (0.417569)*/,
    2,6, 12,-13/*mean (0.171874), correlation (0.408471)*/,
    9,-12, 10,3/*mean (0.175146), correlation (0.41296)*/,
    -8,4, -7,9/*mean (0.183682), correlation (0.402956)*/,
    -11,12, -4,-6/*mean (0.184672), correlation (0.416125)*/,
    1,12, 2,-8/*mean (0.191487), correlation (0.386696)*/,
    6,-9, 7,-4/*mean (0.192668), correlation (0.394771)*/,
    2,3, 3,-2/*mean (0.200157), correlation (0.408303)*/,
    6,3, 11,0/*mean (0.204588), correlation (0.411762)*/,
    3,-3, 8,-8/*mean (0.205904), correlation (0.416294)*/,
    7,8, 9,3/*mean (0.213237), correlation (0.409306)*/,
    -11,-5, -6,-4/*mean (0.243444), correlation (0.395069)*/,
    -10,11, -5,10/*mean (0.247672), correlation (0.413392)*/,
    -5,-8, -3,12/*mean (0.24774), correlation (0.411416)*/,
    -10,5, -9,0/*mean (0.00213675), correlation (0.454003)*/,
    8,-1, 12,-6/*mean (0.0293635), correlation (0.455368)*/,
    4,-6, 6,-11/*mean (0.0404971), correlation (0.457393)*/,
    -10,12, -8,7/*mean (0.0481107), correlation (0.448364)*/,
    4,-2, 6,7/*mean (0.050641), correlation (0.455019)*/,
    -2,0, -2,12/*mean (0.0525978), correlation (0.44338)*/,
    -5,-8, -5,2/*mean (0.0629667), correlation (0.457096)*/,
    7,-6, 10,12/*mean (0.0653846), correlation (0.445623)*/,
    -9,-13, -8,-8/*mean (0.0858749), correlation (0.449789)*/,
    -5,-13, -5,-2/*mean (0.122402), correlation (0.450201)*/,
    8,-8, 9,-13/*mean (0.125416), correlation (0.453224)*/,
    -9,-11, -9,0/*mean (0.130128), correlation (0.458724)*/,
    1,-8, 1,-2/*mean (0.132467), correlation (0.440133)*/,
    7,-4, 9,1/*mean (0.132692), correlation (0.454)*/,
    -2,1, -1,-4/*mean (0.135695), correlation (0.455739)*/,
    11,-6, 12,-11/*mean (0.142904), correlation (0.446114)*/,
    -12,-9, -6,4/*mean (0.146165), correlation (0.451473)*/,
    3,7, 7,12/*mean (0.147627), correlation (0.456643)*/,
    5,5, 10,8/*mean (0.152901), correlation (0.455036)*/,
    0,-4, 2,8/*mean (0.167083), correlation (0.459315)*/,
    -9,12, -5,-13/*mean (0.173234), correlation (0.454706)*/,
    0,7, 2,12/*mean (0.18312), correlation (0.433855)*/,
    -1,2, 1,7/*mean (0.185504), correlation (0.443838)*/,
    5,11, 7,-9/*mean (0.185706), correlation (0.451123)*/,
    3,5, 6,-8/*mean (0.188968), correlation (0.455808)*/,
    -13,-4, -8,9/*mean (0.191667), correlation (0.459128)*/,
    -5,9, -3,-3/*mean (0.193196), correlation (0.458364)*/,
    -4,-7, -3,-12/*mean (0.196536), correlation (0.455782)*/,
    6,5, 8,0/*mean (0.1972), correlation (0.450481)*/,
    -7,6, -6,12/*mean (0.199438), correlation (0.458156)*/,
    -13,6, -5,-2/*mean (0.211224), correlation (0.449548)*/,
    1,-10, 3,10/*mean (0.211718), correlation (0.440606)*/,
    4,1, 8,-4/*mean (0.213034), correlation (0.443177)*/,
    -2,-2, 2,-13/*mean (0.234334), correlation (0.455304)*/,
    2,-12, 12,12/*mean (0.235684), correlation (0.443436)*/,
    -2,-13, 0,-6/*mean (0.237674), correlation (0.452525)*/,
    4,1, 9,3/*mean (0.23962), correlation (0.444824)*/,
    -6,-10, -3,-5/*mean (0.248459), correlation (0.439621)*/,
    -3,-13, -1,1/*mean (0.249505), correlation (0.456666)*/,
    7,5, 12,-11/*mean (0.00119208), correlation (0.495466)*/,
    4,-2, 5,-7/*mean (0.00372245), correlation (0.484214)*/,
    -13,9, -9,-5/*mean (0.00741116), correlation (0.499854)*/,
    7,1, 8,6/*mean (0.0208952), correlation (0.499773)*/,
    7,-8, 7,6/*mean (0.0220085), correlation (0.501609)*/,
    -7,-4, -7,1/*mean (0.0233806), correlation (0.496568)*/,
    -8,11, -7,-8/*mean (0.0236505), correlation (0.489719)*/,
    -13,6, -12,-8/*mean (0.0268781), correlation (0.503487)*/,
    2,4, 3,9/*mean (0.0323324), correlation (0.501938)*/,
    10,-5, 12,3/*mean (0.0399235), correlation (0.494029)*/,
    -6,-5, -6,7/*mean (0.0420153), correlation (0.486579)*/,
    8,-3, 9,-8/*mean (0.0548021), correlation (0.484237)*/,
    2,-12, 2,8/*mean (0.0616622), correlation (0.496642)*/,
    -11,-2, -10,3/*mean (0.0627755), correlation (0.498563)*/,
    -12,-13, -7,-9/*mean (0.0829622), correlation (0.495491)*/,
    -11,0, -10,-5/*mean (0.0843342), correlation (0.487146)*/,
    5,-3, 11,8/*mean (0.0929937), correlation (0.502315)*/,
    -2,-13, -1,12/*mean (0.113327), correlation (0.48941)*/,
    -1,-8, 0,9/*mean (0.132119), correlation (0.467268)*/,
    -13,-11, -12,-5/*mean (0.136269), correlation (0.498771)*/,
    -10,-2, -10,11/*mean (0.142173), correlation (0.498714)*/,
    -3,9, -2,-13/*mean (0.144141), correlation (0.491973)*/,
    2,-3, 3,2/*mean (0.14892), correlation (0.500782)*/,
    -9,-13, -4,0/*mean (0.150371), correlation (0.498211)*/,
    -4,6, -3,-10/*mean (0.152159), correlation (0.495547)*/,
    -4,12, -2,-7/*mean (0.156152), correlation (0.496925)*/,
    -6,-11, -4,9/*mean (0.15749), correlation (0.499222)*/,
    6,-3, 6,11/*mean (0.159211), correlation (0.503821)*/,
    -13,11, -5,5/*mean (0.162427), correlation (0.501907)*/,
    11,11, 12,6/*mean (0.16652), correlation (0.497632)*/,
    7,-5, 12,-2/*mean (0.169141), correlation (0.484474)*/,
    -1,12, 0,7/*mean (0.169456), correlation (0.495339)*/,
    -4,-8, -3,-2/*mean (0.171457), correlation (0.487251)*/,
    -7,1, -6,7/*mean (0.175), correlation (0.500024)*/,
    -13,-12, -8,-13/*mean (0.175866), correlation (0.497523)*/,
    -7,-2, -6,-8/*mean (0.178273), correlation (0.501854)*/,
    -8,5, -6,-9/*mean (0.181107), correlation (0.494888)*/,
    -5,-1, -4,5/*mean (0.190227), correlation (0.482557)*/,
    -13,7, -8,10/*mean (0.196739), correlation (0.496503)*/,
    1,5, 5,-13/*mean (0.19973), correlation (0.499759)*/,
    1,0, 10,-13/*mean (0.204465), correlation (0.49873)*/,
    9,12, 10,-1/*mean (0.209334), correlation (0.49063)*/,
    5,-8, 10,-9/*mean (0.211134), correlation (0.503011)*/,
    -1,11, 1,-13/*mean (0.212), correlation (0.499414)*/,
    -9,-3, -6,2/*mean (0.212168), correlation (0.480739)*/,
    -1,-10, 1,12/*mean (0.212731), correlation (0.502523)*/,
    -13,1, -8,-10/*mean (0.21327), correlation (0.489786)*/,
    8,-11, 10,-6/*mean (0.214159), correlation (0.488246)*/,
    2,-13, 3,-6/*mean (0.216993), correlation (0.50287)*/,
    7,-13, 12,-9/*mean (0.223639), correlation (0.470502)*/,
    -10,-10, -5,-7/*mean (0.224089), correlation (0.500852)*/,
    -10,-8, -8,-13/*mean (0.228666), correlation (0.502629)*/,
    4,-6, 8,5/*mean (0.22906), correlation (0.498305)*/,
    3,12, 8,-13/*mean (0.233378), correlation (0.503825)*/,
    -4,2, -3,-3/*mean (0.234323), correlation (0.476692)*/,
    5,-13, 10,-12/*mean (0.236392), correlation (0.475462)*/,
    4,-13, 5,-1/*mean (0.236842), correlation (0.504132)*/,
    -9,9, -4,3/*mean (0.236977), correlation (0.497739)*/,
    0,3, 3,-9/*mean (0.24314), correlation (0.499398)*/,
    -12,1, -6,1/*mean (0.243297), correlation (0.489447)*/,
    3,2, 4,-8/*mean (0.00155196), correlation (0.553496)*/,
    -10,-10, -10,9/*mean (0.00239541), correlation (0.54297)*/,
    8,-13, 12,12/*mean (0.0034413), correlation (0.544361)*/,
    -8,-12, -6,-5/*mean (0.003565), correlation (0.551225)*/,
    2,2, 3,7/*mean (0.00835583), correlation (0.55285)*/,
    10,6, 11,-8/*mean (0.00885065), correlation (0.540913)*/,
    6,8, 8,-12/*mean (0.0101552), correlation (0.551085)*/,
    -7,10, -6,5/*mean (0.0102227), correlation (0.533635)*/,
    -3,-9, -3,9/*mean (0.0110211), correlation (0.543121)*/,
    -1,-13, -1,5/*mean (0.0113473), correlation (0.550173)*/,
    -3,-7, -3,4/*mean (0.0140913), correlation (0.554774)*/,
    -8,-2, -8,3/*mean (0.017049), correlation (0.55461)*/,
    4,2, 12,12/*mean (0.01778), correlation (0.546921)*/,
    2,-5, 3,11/*mean (0.0224022), correlation (0.549667)*/,
    6,-9, 11,-13/*mean (0.029161), correlation (0.546295)*/,
    3,-1, 7,12/*mean (0.0303081), correlation (0.548599)*/,
    11,-1, 12,4/*mean (0.0355151), correlation (0.523943)*/,
    -3,0, -3,6/*mean (0.0417904), correlation (0.543395)*/,
    4,-11, 4,12/*mean (0.0487292), correlation (0.542818)*/,
    2,-4, 2,1/*mean (0.0575124), correlation (0.554888)*/,
    -10,-6, -8,1/*mean (0.0594242), correlation (0.544026)*/,
    -13,7, -11,1/*mean (0.0597391), correlation (0.550524)*/,
    -13,12, -11,-13/*mean (0.0608974), correlation (0.55383)*/,
    6,0, 11,-13/*mean (0.065126), correlation (0.552006)*/,
    0,-1, 1,4/*mean (0.074224), correlation (0.546372)*/,
    -13,3, -9,-2/*mean (0.0808592), correlation (0.554875)*/,
    -9,8, -6,-3/*mean (0.0883378), correlation (0.551178)*/,
    -13,-6, -8,-2/*mean (0.0901035), correlation (0.548446)*/,
    5,-9, 8,10/*mean (0.0949843), correlation (0.554694)*/,
    2,7, 3,-9/*mean (0.0994152), correlation (0.550979)*/,
    -1,-6, -1,-1/*mean (0.10045), correlation (0.552714)*/,
    9,5, 11,-2/*mean (0.100686), correlation (0.552594)*/,
    11,-3, 12,-8/*mean (0.101091), correlation (0.532394)*/,
    3,0, 3,5/*mean (0.101147), correlation (0.525576)*/,
    -1,4, 0,10/*mean (0.105263), correlation (0.531498)*/,
    3,-6, 4,5/*mean (0.110785), correlation (0.540491)*/,
    -13,0, -10,5/*mean (0.112798), correlation (0.536582)*/,
    5,8, 12,11/*mean (0.114181), correlation (0.555793)*/,
    8,9, 9,-6/*mean (0.117431), correlation (0.553763)*/,
    7,-4, 8,-12/*mean (0.118522), correlation (0.553452)*/,
    -10,4, -10,9/*mean (0.12094), correlation (0.554785)*/,
    7,3, 12,4/*mean (0.122582), correlation (0.555825)*/,
    9,-7, 10,-2/*mean (0.124978), correlation (0.549846)*/,
    7,0, 12,-2/*mean (0.127002), correlation (0.537452)*/,
    -1,-6, 0,-11/*mean (0.127148), correlation (0.547401)*/
};




int main(int argc, char **argv) {
  
    //读取图像
    Mat img = imread("/home/fuhang/projects/homeworks7_3/1.png", CV_LOAD_IMAGE_COLOR);
    if(img.empty())
      cout << "No picture was found ..." << endl;
    else
      cout << "Img loaded successed!" << endl;

    vector<int> features_num_per_level; //每层特征点数
    vector<int> umax; //存储特征方向,每个v对应的最大u
    vector<float> vec_scale_factor; // 存储每层尺度因子
    
    //构建图像金字塔
            
    //初始化每层金字塔对应的尺度因子
    vec_scale_factor.resize(levels_num);
    vec_scale_factor[0] = 1.0f;
    for (int i=1; i<levels_num; i++)
    {
      vec_scale_factor[i] = vec_scale_factor[i-1] * scale_factor;    
    }
    
    std::vector<cv::Mat> vec_image_pyramid(levels_num); //图像金字塔容器
    
    for (int level=0; level<levels_num; ++level)
    {
      float scale = 1.0f / vec_scale_factor[level];
      cv::Size sz(cvRound((float)img.cols*scale), cvRound((float)img.rows*scale)); //图像大小设置
    
      if(level == 0)
      {
	vec_image_pyramid[level] = img;	
      }
      else
      {
	resize(vec_image_pyramid[level-1], vec_image_pyramid[level], sz, 0, 0, CV_INTER_LINEAR); //双线性插值新图像
      }
      
      // 金字塔构建过程可视化
      cout << "正在构建第" << level+1 << "层图像金字塔" << endl;
      imshow("img", vec_image_pyramid[level]);
      waitKey(100);
    }
    
    
    cout << "*****************************************" << endl;
    
    //四叉树划分特征点
    
    //搜索每一层图像上的特征点
    std::vector<std::vector<cv::KeyPoint>> all_keypoints; // 所有图层图像上特征点容器,容器的容器
    all_keypoints.resize(levels_num);
    
    const float border_width = 30; // 设置栅栏格子大小
    
    for (int level=0; level<levels_num; ++level)
    {
      // 得到每一层图像进行特征检测区域上下两坐标
      const int min_border_x = EDGE_THRESHOLD - 3; // 边缘阈值滤掉
      const int min_border_y = min_border_x;
      const int max_border_x = vec_image_pyramid[level].cols - EDGE_THRESHOLD + 3;
      const int max_border_y = vec_image_pyramid[level].rows - EDGE_THRESHOLD + 3;
      
      //每层待分配关键点数量
      std::vector<cv::KeyPoint> vec_to_distribute_keys;
      vec_to_distribute_keys.reserve(features_num * 10);
      //计算总面积长宽
      const float width = max_border_x - min_border_x;
      const float height = max_border_y - min_border_y;
      //记录划分完后格子行和列
      const int cols = width/border_width;
      const int rows = height/border_width;
      //重新计算格子大小
      const int width_cell = ceil(width/cols);
      const int height_cell = ceil(height/rows);
      cout << "第" << level+1 << "层图像切割成" << rows << "行" << cols << "列, ";
      
      //开始对每个格子进行检测
      for (int i=0; i<rows; i++)
      {
	const float ini_y = min_border_y + i * height_cell; //格子高度下坐标
	float max_y = ini_y + height_cell + 6; //格子高度上坐标
	
	if(ini_y >= max_border_y - 3)
	  continue;
	if(max_y >=max_border_y)
	  max_y = max_border_y;
	
	for (int j=0; j<cols; j++)
	{
	  const float ini_x = min_border_x + j * width_cell;
	  float max_x = ini_x + width_cell + 6;
	  
	  if(ini_x >= max_border_x - 6) //一般认为相片宽度比高度大
	    continue;
	  if(max_x > max_border_x)
	    max_x = max_border_x;
	  
	  std::vector<cv::KeyPoint> vec_keys_cell; // 用FAST特征检测并存储每个格子的特征点
	  cv::FAST(vec_image_pyramid[level].rowRange(ini_y, max_y).colRange(ini_x, max_x), vec_keys_cell, default_fast_threshold, true);
	  
	  //如果fast测空,降低阈值继续检测
	  if (vec_keys_cell.empty())
	  {
	    cv::FAST(vec_image_pyramid[level].rowRange(ini_y, max_y).colRange(ini_x, max_x), vec_keys_cell, min_fast_threshold, true);
	  }
	  //计算特征点位置
	  if (!vec_keys_cell.empty())
	  {
	    for (std::vector<cv::KeyPoint>::iterator vit = vec_keys_cell.begin(); vit != vec_keys_cell.end(); vit++) //迭代法遍历每个格子中特征点容器的特征点
	    {
	      //记录特征点在图像中的绝对坐标
	      (*vit).pt.x += j * width_cell;
	      (*vit).pt.y += i * height_cell;
	      vec_to_distribute_keys.push_back(*vit);	      
	    }
	  }
	}
      }
      
      cout << "这层图像共有" <<  vec_to_distribute_keys.size() << "个特征点" << endl;
      
      std::vector<cv::KeyPoint> & keypoints = all_keypoints[level];
      keypoints.reserve(features_num);
    
      // 开始四叉树划分
      // 初始化几个节点,不难发现,由于长和宽比较接近,所以一般初始节点为1
      const int init_node_num = round(static_cast<float>(max_border_x - min_border_x)/(max_border_y - min_border_y));
      cout << "初始时有" << init_node_num << "个节点";
      //节点间间隔
      const float interval_x = static_cast<float>(max_border_x - min_border_x)/init_node_num;
      cout << "节点间隔为" << interval_x << ", ";
      
      
      //四叉树设计
      // 定义节点类型的初始节点容器
      std::vector<ExtractorNode*> init_nodes;
      init_nodes.resize(init_node_num);
      // 划分之后的节点列表
      std::list<ExtractorNode> list_nodes;
      // 处理初始节点
      for (int i=0; i<init_node_num; i++)
      {
	// 定义四叉树节点变量ni
	ExtractorNode ni;
	ni.UL = cv::Point2i(interval_x * static_cast<float>(i), 0);
	ni.UR = cv::Point2i(interval_x * static_cast<float>(i+1), 0);
	ni.BL = cv::Point2i(ni.UL.x, max_border_y-min_border_y);
	ni.BR = cv::Point2i(ni.UR.x, max_border_y-min_border_y);
	ni.vKeys.reserve(vec_to_distribute_keys.size());
	
	list_nodes.push_back(ni);
	init_nodes[i] = &list_nodes.back(); //返回list_nodes最后元素值	
      }
      
      //将点分配给初级节点
      for (size_t i=0; i<vec_to_distribute_keys.size(); i++)
      {
	const cv::KeyPoint &kp = vec_to_distribute_keys[i];
	init_nodes[kp.pt.x/interval_x]->vKeys.push_back(kp);
      }
      
      //设计节点迭代器
      std::list<ExtractorNode>::iterator lit = list_nodes.begin();
      //遍历节点列表
      while (lit != list_nodes.end())
      {
	//只含有一个特征点,就不在划分;
	if (lit->vKeys.size() == 1)
	{
	  lit->bNoMore = true;
	  lit++;
	}
	else if (lit->vKeys.empty())
	  lit = list_nodes.erase(lit); // 如果这个节点没有特征点就删了
	else
	  lit++;
      }
      
      //完结标志定义
      bool is_finish = false;
      //迭代计数
      int iteration = 0;
      
      // 定义新数据类型节点及其所包含的特征数
      std::vector<std::pair<int, ExtractorNode*>> keys_size_and_node;
      keys_size_and_node.reserve(list_nodes.size() * 4);
      
      while (!is_finish)
      {
	iteration++;
	//初始化节点个数,用于判断节点是否再次进行了划分
	int pre_size = list_nodes.size();
	
	lit = list_nodes.begin();
	//定义节点分解次数
	int to_expand_num = 0;
	keys_size_and_node.clear();
	
	while (lit != list_nodes.end())
	{
	  if (lit->bNoMore)
	  {
	    lit++;
	    continue;
	  }
	  else
	  {
	    //超过一个特征点就继续划分
	    ExtractorNode n1, n2, n3, n4;
	    lit -> DivideNode(n1, n2, n3, n4);
	    
	    //对划分后的节点进行判断,判断是否含有特征点,含有特征点则添加特征点
	    if (n1.vKeys.size() > 0)
	    {
	      list_nodes.push_front(n1);
	      if (n1.vKeys.size() > 1)
	      {
		to_expand_num++;
		keys_size_and_node.push_back(std::make_pair(n1.vKeys.size(), &list_nodes.front()));
		list_nodes.front().lit = list_nodes.begin();
	      }
	    }
	    
	    if (n2.vKeys.size() > 0)
	    {
	      list_nodes.push_front(n2);
	      if (n2.vKeys.size() > 1)
	      {
		to_expand_num++;
		keys_size_and_node.push_back(std::make_pair(n2.vKeys.size(), &list_nodes.front()));
		list_nodes.front().lit = list_nodes.begin();
	      }
	    }
	    
	    if (n3.vKeys.size() > 0)
	    {
	      list_nodes.push_front(n3);
	      if (n3.vKeys.size() > 1)
	      {
		to_expand_num++;
		keys_size_and_node.push_back(std::make_pair(n3.vKeys.size(), &list_nodes.front()));
		list_nodes.front().lit = list_nodes.begin();
	      }
	    }
	    
	    if (n4.vKeys.size() > 0)
	    {
	      list_nodes.push_front(n4);
	      if (n4.vKeys.size() > 1)
	      {
		to_expand_num++;
		keys_size_and_node.push_back(std::make_pair(n4.vKeys.size(), &list_nodes.front()));
		list_nodes.front().lit = list_nodes.begin();
	      }
	    }
	    
	    lit = list_nodes.erase(lit);
	    continue;
	  }
	}

	
	//给每层分配特征点数先估计
	features_num_per_level.resize(levels_num);
	float factor = 1.0f / scale_factor;
	float desired_features_per_scale = features_num * (1-factor) / (1-(float)pow((double)factor, (double)levels_num)); // 构造等比数列
	int sum_features = 0;
	for (int level=0; level<levels_num; level++)
	{
	  features_num_per_level[level] = cvRound(desired_features_per_scale);
	  sum_features += features_num_per_level[level];
	  desired_features_per_scale *= factor;
	}
	features_num_per_level[levels_num-1] = std::max(features_num-sum_features, 0);
	

	// 当节点个数大于需分配的特征数或者所有的节点只有一个特征点(节点不能划分)的时候,则结束。
	if ((int)list_nodes.size() >= features_num_per_level[level] || (int)list_nodes.size() == pre_size)
	{
	  is_finish = true;
	}
	else if (((int)list_nodes.size() + to_expand_num * 3) > features_num_per_level[level])// 节点展开次数乘以3用于表明下一次的节点分解可能超过特征数,即为最后一次分解
	{
	  
	  while (!is_finish)
	  {
	    pre_size = list_nodes.size();

	    std::vector<std::pair<int, ExtractorNode*> > prev_size_and_node = keys_size_and_node;
	    keys_size_and_node.clear();

	    sort(prev_size_and_node.begin(), prev_size_and_node.end());
	    for (int j = prev_size_and_node.size()-1; j >= 0; j--)
	    {
	      
	      ExtractorNode n1, n2, n3, n4;
	      prev_size_and_node[j].second->DivideNode(n1, n2, n3, n4);

	      // 划分之后进一步的判断
	      if (n1.vKeys.size() > 0)
	      {
		list_nodes.push_front(n1);
		if (n1.vKeys.size() > 1)
		{
		  keys_size_and_node.push_back(std::make_pair(n1.vKeys.size(), &list_nodes.front()));
		  list_nodes.front().lit = list_nodes.begin();
		}
	      }
	      if (n2.vKeys.size() > 0)
	      {
		list_nodes.push_front(n2);
		if (n2.vKeys.size() > 1)
		{
		  keys_size_and_node.push_back(std::make_pair(n2.vKeys.size(), &list_nodes.front()));
		  list_nodes.front().lit = list_nodes.begin();
		}
	      }
	      if (n3.vKeys.size() > 0)
	      {
		list_nodes.push_front(n3);
		if (n3.vKeys.size() > 1)
		{
		  keys_size_and_node.push_back(std::make_pair(n3.vKeys.size(), &list_nodes.front()));
		  list_nodes.front().lit = list_nodes.begin();
		}
	      }
	      if (n4.vKeys.size() > 0)
	      {
		list_nodes.push_front(n4);
		if (n4.vKeys.size() > 1)
		{
		  keys_size_and_node.push_back(std::make_pair(n4.vKeys.size(), &list_nodes.front()));
		  list_nodes.front().lit = list_nodes.begin();
		}
	      }
	      
	      list_nodes.erase(prev_size_and_node[j].second->lit);
	      if ((int)list_nodes.size() >= features_num_per_level[level])
		break;
	     }
	      if ((int)list_nodes.size() >= features_num_per_level[level] || (int)list_nodes.size() == pre_size)
		is_finish = true;
	    }
	}

      }

	
      // 用KeyPoints数据类型.response进行挑选保留每个节点下最好的特征点
      std::vector<cv::KeyPoint> result_keys;
      result_keys.reserve(features_num_per_level[level]);
      for (std::list<ExtractorNode>::iterator lit = list_nodes.begin(); lit != list_nodes.end(); lit++)
      {
	  std::vector<cv::KeyPoint> &node_keys = lit->vKeys;
	  cv::KeyPoint* keypoint = &node_keys[0];
	  float max_response = keypoint->response;

	  for (size_t k = 1; k < node_keys.size(); k++)
	  {
	    if (node_keys[k].response > max_response)
	    {
	      keypoint = &node_keys[k];
	      max_response = node_keys[k].response;
	    }
	  }

	  result_keys.push_back(*keypoint);
	}
    
    keypoints = result_keys;
    
    const int scaled_path_size = PATCH_SIZE * vec_scale_factor[level];
    // 换算特征点的真实值
    const int nkps = keypoints.size();
    for (int i=0; i<nkps; i++)
    {
      keypoints[i].pt.x += min_border_x;
      keypoints[i].pt.y += min_border_y;
      keypoints[i].octave = level;
      keypoints[i].size = scaled_path_size;
    }
    
   cout << "经过四叉树筛选,第" << level+1 << "层剩余" << result_keys.size() << "个特征点" << endl;
   
   } 
   
   
   cout << "*****************************************" << endl;
   
   //计算特征点的方向,尽管本题不涉及,但是为了大家方便理解ORB_SLAM2还是写一下
       
   //准备数学工具,在特征计算时,每个v坐标对应的最大u坐标
   vector<int> u_max; //在半径等于HALF_SIZE的范围内,v=x下,u所能取到的最大坐标的绝对值
   u_max.resize(HALF_PATH_SIZE + 1);
   //将v坐标分两部分计算,xy方向比较对称
   int v, v0, vmax = cvFloor(HALF_PATH_SIZE * sqrt(2.f) / 2 + 1);
   int vmin = cvCeil(HALF_PATH_SIZE * sqrt(2.f)/2);
   // 勾股定理
   const double hp2 = HALF_PATH_SIZE * HALF_PATH_SIZE;
   for (v = 0; v<=vmax; ++v)
      u_max[v] = cvRound(sqrt(hp2 - v*v));
    //确保对称是个圆
    for (v = HALF_PATH_SIZE, v0 = 0; v>=vmin; --v)
    {
      while (u_max[v0] == u_max[v0+1])
	++v0;
      u_max[v] = v0;
      ++v0;
     }
      
   for (int level=0; level<levels_num; level++)
   {
     //指针提取该层特征点
      vector<KeyPoint>& keypoints = all_keypoints[level];

      //逐个特征点计算
      for (size_t i=0; i<keypoints.size(); i++)
      {
	//计算特征点这个区域内所有像素与其x坐标的乘积以及所有像素与其对应y坐标的乘积
	int m_01 = 0, m_10 = 0;
	const uchar* center = &vec_image_pyramid[level].at<uchar> (cvRound(keypoints[i].pt.y), cvRound(keypoints[i].pt.y)); // 得到每个特征点的中心位置
	
	// 当v=0时单独计算
	for (int u = -HALF_PATH_SIZE; u<HALF_PATH_SIZE; ++u)
	  m_10 += u * center[u];
	
	int step = (int)vec_image_pyramid[level].step1();
	
	for (int v=1; v<=HALF_PATH_SIZE; ++v)
	{
	  int v_sum = 0;
	  int d = u_max[v];
	  for (int u=-d; u<=d; ++u)
	  {
	    int val_plus = center[u+v*step], val_minus = center[u-v*step];
	    v_sum += (val_plus-val_minus);
	    m_10 += u * (val_plus+val_minus);
	  }
	  m_01 += v * v_sum;
	 }
	
	keypoints[i].angle = cv::fastAtan2((float)m_01, (float)m_10);
        //cout << "检测到第" << level << "层,第" << i << "个特征点角度为" << keypoints[i].angle << "度" << endl;
       }

     }

   
   //设计描述子
   //拷贝模板
   //bit_pattern_31储存的是通过学习后选取的256个点对相较于特征点的位置,其中四各元素分别表示pair_l.x,pair_l.y,pail_r.x,pair_r.y,
   //共有256个点对,产生256维描述子,每个描述子长32个字节,可以imshow查看描述子的样子
   vector<Point> pattern;   
   const int num_points = 512;
   const Point* pattern0 = (const Point*)bit_pattern_31;
   std::copy(pattern0, pattern0+num_points, std::back_inserter(pattern)); //将从pattern0到pattern0+512序列(也就是前面定义一串数字)拷贝到pattern中

   
   Mat descriptors;
   int num_keypoints = 0;
   for (int level=0; level<levels_num; ++level)
     num_keypoints += (int)all_keypoints[level].size(); // 统计所有特征点数总和
   cout << "共有" << num_keypoints << "个特征点,现在开始添加描述子..." << endl;
   
   //设计总输出特征点容器
   vector<KeyPoint> out_put_keypoints (num_keypoints);
   
   if (num_keypoints !=0)
     descriptors.create(num_keypoints, 32, CV_8U);
   
   
   int offset = 0;
   for (int level=0; level<levels_num; ++level)
   {
      vector<KeyPoint>& keypoints = all_keypoints[level];
      
      int num_keypoints_level = (int)keypoints.size();
      
      if (num_keypoints_level == 0)
	continue;
      
      //高斯模糊是为了计算BRIEF时去噪
      cout << "开始将原图层高斯模糊,正在模糊第" << level+1 << "张图..." << endl;
      Mat working_mat = vec_image_pyramid[level].clone();
      GaussianBlur(working_mat, working_mat, Size(7,7), 2, 2, BORDER_REFLECT_101);
      imshow("img", working_mat);
      waitKey(100);
      
      //计算每一层的描述子
      Mat descriptors_per_level = descriptors.rowRange(offset, offset+num_keypoints_level);
      descriptors_per_level = Mat::zeros((int)keypoints.size(), 32, CV_8UC1); // 每一层的描述子
      
      const float factorPI = (float)(CV_PI/180.f);      
      for (size_t i=0; i<keypoints.size(); i++)
      {
	uchar* desc = descriptors_per_level.ptr((int)i);
	Point* ppattern = &pattern[0];

	float angle = (float)keypoints[i].angle * factorPI; // 转化为弧度
	float a = (float)cos(angle), b = (float)sin(angle);
	
	const uchar* center = &working_mat.at<uchar>(cvRound(keypoints[i].pt.y), cvRound(keypoints[i].pt.x));
	const int step = (int)working_mat.step;
        
	//取旋转后一个像素点的值
	#define GET_VALUE(idx) \
	  center[ cvRound(ppattern[idx].x*b + ppattern[idx].y*a)*step + \
	          cvRound(ppattern[idx].x*a - ppattern[idx].y*b)]

	 //循环32次,pattern取值16*32=512,也就是说每次取16个点,形成8个点对,8个点对比较可以形成8bit长度的特征描述数据
	 for (int i=0; i<32; ++i, ppattern += 16)
	 {
	   int t0, t1, val;
	   
	   t0 = GET_VALUE(0); 
	   t1 = GET_VALUE(1);
	   val = t0 < t1;
	   
	   t0 = GET_VALUE(2);
	   t1 = GET_VALUE(3);
	   val |= (t0 < t1) << 1;
	   
	   t0 = GET_VALUE(4); 
	   t1 = GET_VALUE(5);
           val |= (t0 < t1) << 2;
        
	   t0 = GET_VALUE(6); 
	   t1 = GET_VALUE(7);
           val |= (t0 < t1) << 3;
        
	   t0 = GET_VALUE(8); 
	   t1 = GET_VALUE(9);
           val |= (t0 < t1) << 4;
        
	   t0 = GET_VALUE(10); 
	   t1 = GET_VALUE(11);
           val |= (t0 < t1) << 5;
        
	   t0 = GET_VALUE(12); 
	   t1 = GET_VALUE(13);
           val |= (t0 < t1) << 6;
        
	   t0 = GET_VALUE(14); 
	   t1 = GET_VALUE(15);
           val |= (t0 < t1) << 7;

           desc[i] = (uchar)val; // 一共32*8维描述子
	 }

       #undef GET_VALUE
      }
   
      offset += num_keypoints_level;
   
      //对关键点进行尺度恢复,恢复到原图位置
      if (level != 0)
      {
        float scale = vec_scale_factor[level];
        for (vector<KeyPoint>::iterator keypoint = keypoints.begin(), keypoint_end = keypoints.end(); keypoint != keypoint_end; ++keypoint)
	  keypoint->pt *= scale;
      }  
    
    out_put_keypoints.insert(out_put_keypoints.end(), keypoints.begin(), keypoints.end());
    
    }
    
    destroyAllWindows();
    
    Mat out_img1;
    drawKeypoints(img, out_put_keypoints, out_img1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
    imshow("四叉树法ORB", out_img1);
    imwrite("NewORB.png", out_img1);
    waitKey(0);
  
    vector<KeyPoint> orb_keypoints;
    Ptr<ORB> orb = ORB::create(1000);    
    orb->detect(img,orb_keypoints);
    cout << "共找到了" << orb_keypoints.size() << "个特征点;" << endl;   
    
    cv::Mat img_orb;
    cv::drawKeypoints(img, orb_keypoints, img_orb, cv::Scalar::all(-1), cv::DrawMatchesFlags::DEFAULT);
    cv::imshow("普通ORB算法", img_orb);
    cv::imwrite("NormalORB.png", img_orb);
    cv::waitKey(0);
    
    return 0;
      
}


其运行结果如下图所示:第一张图是用了四叉树进行均匀划分的ORB算法,第二张则是直接用opencv自带的ORB算法:

四叉树均匀划分的方法跟经典ORB算法变化不大,一般还是:1.构建图像金字塔;2.将每层图像划分网格,FAST算法提取特征点;3.对网格内的特征点进行评估,选取质量最好的留下作为代表关键点;4.用强度质心法计算每个关键点的方向;5对金字塔图层进行高斯滤波;6.使用BRIEF计算各个关键点的描述子(包含论文中的启发式搜索算法得到的256对匹配对坐标进行优化);7.保留关键点和描述子。

 

4.研究FLANN为何能够快速处理匹配问题。除了FLANN之外,还有哪些可以加速匹配的手段?

FLANN的全称为Fast Library for Approximate Nearest Neighbors,也就是快速最近领进搜索包。它是一个对大数据集和高维特征进行最近邻近的算法的集合。在面对大数据集时它的性能要好于BFMatcher。匹配问题实际上就是一个特征向量求相似度问题。对于最简单的办法,就是逐个匹配对计算距离。明显这种遍历的方式是效率极低的,对于大数据情况下不适用。因此经典kd-tree的搜索回溯的搜索办法在这里派上了用场,减少了不必要的计算过程,提高了效率,但是对于多维数据而言,其效果往往不升反降。在经典kd-tree算法上提出了随机kd-tree算法,随即选出方差较大的维度,建立kd-tree,再进行搜索回溯。还有一种分级k-means tree,与之前不同的是先通过k-means算法(之后回环检测时会用到)来进行先数据聚类,然后再进行kd-tree的建立。这些方法相交于传统的直接匹配法优势都比较大。kd-tree算法有一篇博客写的不错:https://www.cnblogs.com/lysuns/articles/4710712.html

一般加速匹配的算法都是在经典的SIFT算法中使用,常见的有主成分分析法,将SIFT描述子从128维降到36维,从而匹配速度增加3倍多;或者用GPU加速,可以使得匹配速度提高十多倍;再后来就是用FPGA加速,其匹配速度能提升10倍;再后来的VF-SIFT(very fast SIFT)算法,其核心思想是从SIFT特征中提取4个特征角,根据特征角区间的不同,避免了大量不必要的搜索,这样据说是普通搜索的1250倍。

 

5.把演示程序使用的EPnP改成其它PnP方法,并研究它们的工作原理。

目前除了EPnP外,OpenCV还提供了另外两种方法:迭代法和P3P法,其中,在OpenCV3中还另外提供了DLS法和UPnP法。首先,再明白一下PnP是啥。PnP,全称Perspective-n-Point,描述了当知道n个3D空间点及其投影位置时,如何估计相机的位姿的。EPnP(Efficient PnP)的思路是将空间中的任意3D点可以用4个不共面的3D点加权表示,然后通过n个3D点在相机平面的投影关系以及四个控制点的权重关系构建一个12 \times 12的矩阵,求这个矩阵的特征向量,就可以得到这个相机平面的坐标,再用POSIT正交投影变换算法得到相机位姿。迭代法实质是迭代求出重投影误差的最小解先用DLT直接线性变换求解,然后用LM算法进行优化,这个解显然不是正解,而且这个方法只能使用4个共面特征点才能求得。P3P法,它需要3对3D-2D的匹配点,然后通过三角形投影关系得到的方程组,然后将方程组进行变换得到两个二元二次方程进行求解。DLS(Direct Least-Squares)算法整体思路是首先对PnP非线性最小贰乘建模,重新定义LS模型以便于参数降维然后构造Maculy矩阵进行求解,将PnP问题重构为无约束LSM问题然后优化求解。UPnP(Uncalibrated PnP)算法跟EPnP差不了太多,仅仅是多估计了焦距。因此,比较适合未标定场合。

 

6.在PnP优化中,将第一个相机的观测也考虑进来,该如何书写?最后结果会产生何种变化?

根据原先课程中pose_estimation_3d2d.cpp的代码,对其中BundleAdjustment函数进行修改(OpenCV3环境),相关代码如下所示:

CMakeLists.txt代码如下:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_6)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11 -O3")

list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package(OpenCV REQUIRED)
find_package(G2O REQUIRED)
find_package(CSparse REQUIRED)

include_directories(
  ${OpenCV_INCLUDE_DIRS}
  ${G2O_INCLUDE_DIRS}
  ${CSPARSE_INCLUDE_DIR}
  "/usr/include/eigen3/"
  )
  

add_executable(homeworks7_6 homeworks7_6.cpp)

target_link_libraries(homeworks7_6 ${OpenCV_LIBS} ${CSPARSE_LIBRARY} g2o_core g2o_stuff g2o_types_sba g2o_csparse_extension)

install(TARGETS homeworks7_6 RUNTIME DESTINATION bin)

homeworks7_6.cpp代码如下所示:

#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>

#include <Eigen/Core>
#include <Eigen/Geometry>

#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/optimization_algorithm_levenberg.h>
#include <g2o/solvers/csparse/linear_solver_csparse.h>
#include <g2o/types/sba/types_six_dof_expmap.h>


using namespace std;
using namespace cv;

//ORB特征识别匹配算法
void find_feature_matches(const cv::Mat& img_1, const cv::Mat& img_2, std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2, std::vector<cv::DMatch>& matches)
{
  Mat descriptors_1, descriptors_2;
  
  cv::Ptr<cv::FeatureDetector> detector = cv::ORB::create();
  cv::Ptr<cv::DescriptorExtractor> descriptor = cv::ORB::create();
  cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
  
  detector->detect(img_1, keypoints_1);
  detector->detect(img_2, keypoints_2);
  
  descriptor->compute(img_1, keypoints_1, descriptors_1);
  descriptor->compute(img_2, keypoints_2, descriptors_2);
  
  vector<DMatch> matchers;
  matcher->match(descriptors_1, descriptors_2, matchers);
  
  double min_dist = 10000, max_dist = 0;
  for (int i=0; i<descriptors_1.rows; i++)
  {
    double dist = matchers[i].distance;
    if (dist < min_dist) min_dist = dist;
    if (dist > max_dist) max_dist = dist;
  }
  
  for (int i=0; i<descriptors_1.rows; i++)
  {
    if (matchers[i].distance <= max(2*min_dist, 30.0))
      matches.push_back(matchers[i]);
  }
  
}


Point2d pixel2cam (const Point2d& p, const Mat& K)
{
  return Point2d
  (
    (p.x - K.at<double>(0,2)) / (K.at<double>(0,0)),
    (p.y - K.at<double>(1,2)) / (K.at<double>(1,1))
  );
}


void bundleAdjustment(const vector<Point3f> points_3d, const vector<Point2f> points_2d_1, const vector<Point2f> points_2d_2, const Mat& K, Mat& R, Mat& t)
{
  //指定pose维度为6,landmark维度为3
  typedef g2o::BlockSolver<g2o::BlockSolverTraits<6,3>> Block;
  //设置线性求解器,使用CSparse分解
  Block::LinearSolverType* linearSolver = new g2o::LinearSolverCSparse<Block::PoseMatrixType>();
  //矩阵快求解器
  Block* solver_ptr = new Block (linearSolver);
  
  //设置levenberg梯度下降法求解
  g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);
  //定义图模型
  g2o::SparseOptimizer optimizer;
  //设置求解器
  optimizer.setAlgorithm(solver);
  
  
  //设置第一个相机顶点
  g2o::VertexSE3Expmap* poseOne = new g2o::VertexSE3Expmap();
  poseOne->setId(0);
  poseOne->setFixed(1); //固定
  poseOne->setEstimate(g2o::SE3Quat()); 
  optimizer.addVertex(poseOne);
  
  //设置第二个相机顶点
  g2o::VertexSE3Expmap* poseTwo = new g2o::VertexSE3Expmap();
  poseTwo->setId(1);
  Eigen::Matrix3d R_Two;
  R_Two << 
  R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2),
  R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2),
  R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2);
  //设置待优化参数为旋转矩阵和平移矩阵
  poseTwo->setEstimate(g2o::SE3Quat(
    R_Two,
    Eigen::Vector3d(t.at<double>(0,0), t.at<double>(1,0), t.at<double>(2,0))
  ));
  optimizer.addVertex(poseTwo);
  
  
  int index = 2;
  for (const Point3f p:points_3d)
  {
    g2o::VertexSBAPointXYZ* point = new g2o::VertexSBAPointXYZ();
    point->setId(index++);
    //待优化空间点的3D位置
    point->setEstimate(Eigen::Vector3d(p.x, p.y, p.z));
    //是否边缘化以进行稀疏求解
    point->setMarginalized(true);
    optimizer.addVertex(point);
  }
  
  //对相机内参进行优化
  g2o::CameraParameters* camera = new g2o::CameraParameters(K.at<double>(0,0),Eigen::Vector2d(K.at<double>(0,2), K.at<double>(1,2)), 0);
  camera->setId(0);
  optimizer.addParameter(camera);
  
  //添加边
  int edgeCount = 0;
  
  //第一个相机观测
  index = 2;
  for (const Point2f p:points_2d_1)
  {
    //重投影误差边类EdgeProjectXYZ2UV
    g2o::EdgeProjectXYZ2UV* edgeOne = new g2o::EdgeProjectXYZ2UV();
    edgeOne->setId(edgeCount++);
    //链接两个顶点
    edgeOne->setVertex(0, dynamic_cast<g2o::VertexSBAPointXYZ*>(optimizer.vertex(index)));
    edgeOne->setVertex(1, dynamic_cast<g2o::VertexSE3Expmap*>(optimizer.vertex(0)));
    //测量值为第一帧的像素坐标
    edgeOne->setMeasurement(Eigen::Vector2d(p.x, p.y));
    edgeOne->setParameterId(0,0);
    edgeOne->setInformation(Eigen::Matrix2d::Identity());
    optimizer.addEdge(edgeOne);
    index++;
  }
  
  //第二个相机观测
  index = 2;
  for (const Point2f p:points_2d_2)
  {
    g2o::EdgeProjectXYZ2UV* edgetwo = new g2o::EdgeProjectXYZ2UV();
    edgetwo->setId(edgeCount++);
    edgetwo->setVertex(0, dynamic_cast<g2o::VertexSBAPointXYZ*>(optimizer.vertex(index)));
    edgetwo->setVertex(1, dynamic_cast<g2o::VertexSE3Expmap*>(optimizer.vertex(1)));
    edgetwo->setMeasurement(Eigen::Vector2d(p.x, p.y));
    edgetwo->setParameterId(0,0);
    //默认加权为1
    edgetwo->setInformation(Eigen::Matrix2d::Identity());
    optimizer.addEdge(edgetwo);
    index++;
  }
  
  //打开调试输出
  //optimizer.setVerbose(true);
  //初始化
  optimizer.initializeOptimization();
  //设置迭代次数
  optimizer.optimize(100);
  cout << "优化后" << endl;
  cout << "T1 = " << endl << Eigen::Isometry3d(poseOne->estimate()).matrix() << endl;
  cout << "T2 = " << endl << Eigen::Isometry3d(poseTwo->estimate()).matrix() << endl;
}


int main(int argc, char **argv) {
  
  //载入rgb图像  
  Mat img_1 = imread("../1.png", CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread("../2.png", CV_LOAD_IMAGE_COLOR);
  
  vector<KeyPoint> keypoints_1, keypoints_2;
  vector<DMatch> matches;
  find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
  
  //载入深度图像
  Mat depth_1 = imread("../1_depth.png", CV_LOAD_IMAGE_UNCHANGED);
  //输入相机内参矩阵
  Mat K = (Mat_<double> (3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  
  //定义3D2D点
  vector<Point3f> pts_3d;
  vector<Point2f> pts_2d_1, pts_2d_2;
  
  for (DMatch m:matches)
  {
    ushort d = depth_1.ptr<unsigned short> (int (keypoints_1[m.queryIdx].pt.y)) [int (keypoints_1[m.queryIdx].pt.x)];
      
    if (d == 0)
      continue;
    
    float dd = d/1000.0;
    
    Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
    pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));
    pts_2d_1.push_back(keypoints_1[m.queryIdx].pt);
    pts_2d_2.push_back(keypoints_2[m.trainIdx].pt);
  }
  
  //根据所得的3D、2D点进行PnP求解
  Mat r, t;
  solvePnP(pts_3d, pts_2d_2, K, Mat(), r, t, false, SOLVEPNP_EPNP);
  Mat R;
  Rodrigues(r, R);
  
  cout << "旋转向量r= " << endl << r << endl;
  cout << "旋转矩阵R= " << endl << R << endl;
  cout << "平移向量t= " << endl << t << endl;
  
  
  //用BA优化
  bundleAdjustment(pts_3d, pts_2d_1, pts_2d_2, K, R, t);
  
    return 0;
    
}

看一下输出结果:

发现将第一帧的观测加入对旋转矩阵R的影响并不大,但是对平移矩阵t的影响却比较大,而且距离优化前的值差得比较远,可能是由于第一帧的观测和第二帧观测的误差叠加或是深度相机的深度值误差被放大的原因。

 

7.在ICP程序中,将空间点也作为优化变量考虑进来,程序应该如何书写?结果会如何变化?

3D-3D问题可能是slam中最容易遇到的问题。ICP(Iterative Closet Point)迭代最近点求解,在这种求解过程中没有相机模型,和相机并没有关系。ICP求解一般分为直接求解发SVD和非线性优化法。对于本题可以在书中的BA优化程序pose_estimation_3d3d.cpp程序中稍作修改。其代码如下所示(这个自定义g2o的方法顶点以及边建立的方法比较典型):

CMakeLists.txt代码如下所示:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_7)

set( CMAKE_BUILD_TYPE "Release" )
set( CMAKE_CXX_FLAGS "-std=c++11 -O3" )

list( APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules )

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package( OpenCV 3 REQUIRED )
find_package( G2O REQUIRED )
find_package( CSparse REQUIRED )

include_directories(
  ${OpenCV_INCLUDE_DIRS}
  ${CSPARSE_INCLUDE_DIR}
  ${CSPARSE_INCLUDE_DIRS}
  "/usr/include/eigen3/"
  "/home/fuhang/projects/feature_extraction"
  )

add_executable(homeworks7_7 homeworks7_7.cpp)
target_link_libraries( homeworks7_7 
   ${OpenCV_LIBS}
   g2o_core g2o_stuff g2o_types_sba g2o_csparse_extension 
   ${CSPARSE_LIBRARY}
)

install(TARGETS homeworks7_7 RUNTIME DESTINATION bin)

homeworks7_7.cpp的代码如下所示:

#include <iostream>

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>

#include <Eigen/Core>
#include <Eigen/SVD>
#include <Eigen/Geometry>

#include <g2o/core/base_vertex.h>
#include <g2o/core/base_binary_edge.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/core/optimization_algorithm_levenberg.h>
#include <g2o/solvers/eigen/linear_solver_eigen.h>
#include <g2o/types/sba/types_six_dof_expmap.h>

using namespace std;
using namespace cv;

void find_feature_matches (const Mat& img_1, const Mat& img_2, vector<KeyPoint>& keypoints_1, vector<KeyPoint>& keypoints_2, vector<DMatch>& matches)
{
  Mat descriptors_1, descriptors_2;
  
  Ptr<FeatureDetector> detector = ORB::create();
  Ptr<DescriptorExtractor> descriptor = ORB::create();
  Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
  
  detector->detect(img_1, keypoints_1);
  detector->detect(img_2, keypoints_2);
  descriptor->compute(img_1, keypoints_1, descriptors_1);
  descriptor->compute(img_2, keypoints_2, descriptors_2);
  
  vector<DMatch> matchers;  
  matcher->match(descriptors_1, descriptors_2, matchers);  
  double min_dist = 10000, max_dist = 0;
  for (int i=0; i<descriptors_1.rows; i++)
  {
    if (matchers[i].distance < min_dist) min_dist = matchers[i].distance;
    if (matchers[i].distance > max_dist) max_dist = matchers[i].distance;
  }
  
  for (auto m:matchers)
  {
    if (m.distance <= max(30.0, 2*min_dist))
      matches.push_back(m);
  }
  
  cout << "共找到" << matches.size() << "对匹配点" << endl;
}


Point2d pixel2cam (const Point2d& p, const Mat& K)
{
    return Point2d
           (
               ( p.x - K.at<double> ( 0,2 ) ) / K.at<double> ( 0,0 ),
               ( p.y - K.at<double> ( 1,2 ) ) / K.at<double> ( 1,1 )
           );
}

void pose_estimation_3d3d ( const vector<Point3f>& pts1, const vector<Point3f>& pts2, Mat& R, Mat& t)
{
  Point3f p1, p2;
  int N = pts1.size();
  for (int i=0; i<N; i++)
  {
     p1 += pts1[i];
     p2 += pts2[i];
  }
  p1 /= N;
  p2 /= N;
  vector<Point3f> q1(N), q2(N);
  for (int i=0; i<N; i++)
  {
    q1[i] = pts1[i] - p1;
    q2[i] = pts2[i] - p2;
  }
  
  Eigen::Matrix3d W = Eigen::Matrix3d::Zero();
  for (int i=0; i<N; i++)
  {
    // W = q1*q2^T
    W += Eigen::Vector3d(q1[i].x, q1[i].y, q1[i].z) * Eigen::Vector3d(q2[i].x, q2[i].y, q2[i].z).transpose();
  }
  cout << "W=" << endl << W << endl;
  //SVD求解
  Eigen::JacobiSVD<Eigen::Matrix3d> svd (W, Eigen::ComputeFullU|Eigen::ComputeFullV);
  Eigen::Matrix3d U = svd.matrixU();
  Eigen::Matrix3d V = svd.matrixV();
  
  if (U.determinant() * V.determinant() < 0)
  {
    for (int x=0; x<3; ++x)
    {
      U(x, 2) *= -1;
    }
  }
  
  cout << "U= " << endl << U << endl;
  cout << "V= " << endl << V << endl;
  
  
  Eigen::Matrix3d R_ = U * (V.transpose());
  Eigen::Vector3d t_ = Eigen::Vector3d (p1.x, p1.y, p1.z) - R_ * Eigen::Vector3d (p2.x, p2.y, p2.z);
  
  R = ( Mat_<double> (3,3) <<
    R_(0,0), R_(0,1), R_(0,2),
    R_(1,0), R_(1,1), R_(1,2),
    R_(2,0), R_(2,1), R_(2,2)
  );
  
  t = (Mat_<double> (3,1) << t_(0,0), t_(1,0), t_(2,0));
  
  cout << "经过SVD求解后,R=" << endl << R << endl << "t=" << t << endl;
}

//设计二元边
class EdgeProjectXYZRGBDPoseOnly : public g2o::BaseBinaryEdge<3, Eigen::Vector3d, g2o::VertexSBAPointXYZ, g2o::VertexSE3Expmap>
{
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
  EdgeProjectXYZRGBDPoseOnly() {}
  
  virtual void computeError()
  {
    const g2o::VertexSBAPointXYZ * point = static_cast<const g2o::VertexSBAPointXYZ*> (_vertices[0]);
    const g2o::VertexSE3Expmap * pose = static_cast<const g2o::VertexSE3Expmap*> (_vertices[1]);
    //第二帧测到的点用第一帧来测量
    _error = _measurement - pose->estimate().map(point->estimate());
  }
  
  virtual void linearizeOplus() override final
  {
    g2o::VertexSBAPointXYZ * point = static_cast<g2o::VertexSBAPointXYZ * > (_vertices[0]);
    g2o::VertexSE3Expmap * pose = static_cast<g2o::VertexSE3Expmap * > (_vertices[1]);
    g2o::SE3Quat T (pose->estimate());
    //第一帧音射到第二帧
    Eigen::Vector3d xyz_trans = T.map(point->estimate());
    double x = xyz_trans[0];
    double y = xyz_trans[1];
    double z = xyz_trans[2];
    
    //空间点的雅克比矩阵
    _jacobianOplusXi = -T.rotation().toRotationMatrix();
    
    _jacobianOplusXj(0,0) = 0;
    _jacobianOplusXj(0,1) = -z;
    _jacobianOplusXj(0,2) = y;
    _jacobianOplusXj(0,3) = -1;
    _jacobianOplusXj(0,4) = 0;
    _jacobianOplusXj(0,5) = 0;

    _jacobianOplusXj(1,0) = z;
    _jacobianOplusXj(1,1) = 0;
    _jacobianOplusXj(1,2) = -x;
    _jacobianOplusXj(1,3) = 0;
    _jacobianOplusXj(1,4) = -1;
    _jacobianOplusXj(1,5) = 0;

    _jacobianOplusXj(2,0) = -y;
    _jacobianOplusXj(2,1) = x;
    _jacobianOplusXj(2,2) = 0;
    _jacobianOplusXj(2,3) = 0;
    _jacobianOplusXj(2,4) = 0;
    _jacobianOplusXj(2,5) = -1;
  }
  
  bool read (istream& in) {}
  bool write (ostream& out) const {}

};


void bundleAdjustment (const vector<Point3f>& pts1, const vector<Point3f>& pts2, Mat& R, Mat& t)
{
  typedef g2o::BlockSolver<g2o::BlockSolverTraits<6,3> > Block;
  Block::LinearSolverType* linearSolver = new g2o::LinearSolverEigen<Block::PoseMatrixType>();
  Block* solve_ptr = new Block(linearSolver);
  //假如按照书上的高斯牛顿法则迭代过程会发散,这是一个病态方程
  g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solve_ptr);
  g2o::SparseOptimizer optimizer;
  optimizer.setAlgorithm(solver);
  
  //vertex
  g2o::VertexSE3Expmap* pose = new g2o::VertexSE3Expmap();
  pose->setId(0);
  //使用R,t初值
  Eigen::Matrix3d R_mat;
  R_mat << 
      R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2),
      R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2),
      R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2);
  pose->setEstimate(g2o::SE3Quat(
    R_mat,
    Eigen::Vector3d(t.at<double>(0,0), t.at<double>(1,0), t.at<double>(2,0))
  ));
  optimizer.addVertex(pose);
  
  //points
  int pointIndex = 1;
  for (size_t i=0; i<pts2.size(); i++)
  {
    g2o::VertexSBAPointXYZ *points = new g2o::VertexSBAPointXYZ();
    points->setId(pointIndex++);
    points->setMarginalized(true);
    //优化值设置为pts2的点
    points->setEstimate(Eigen::Vector3d(pts2[i].x, pts2[i].y, pts2[i].z));
    optimizer.addVertex(points);
  }
  
  //edges
  int index = 0;
  for (size_t i=0; i<pts1.size(); i++)
  {
    EdgeProjectXYZRGBDPoseOnly* edge = new EdgeProjectXYZRGBDPoseOnly();
    edge->setId(index);
    edge->setVertex(0, dynamic_cast<g2o::VertexSBAPointXYZ*>(optimizer.vertex(index+1))); //二元边不能自己连接自己
    edge->setVertex(1, dynamic_cast<g2o::OptimizableGraph::Vertex*>(optimizer.vertex(0)));
    //误差计算方法
    edge->setMeasurement(Eigen::Vector3d(pts1[i].x, pts1[i].y, pts1[i].z));
    //信息矩阵
    edge->setInformation(Eigen::Matrix3d::Identity()*1e4);
    optimizer.addEdge(edge);
    index++;
  }
  
  optimizer.setVerbose(true);
  optimizer.initializeOptimization();
  optimizer.optimize(100);
  cout << "优化后,T = " << endl << Eigen::Isometry3d(pose->estimate()).matrix() << endl;
}


int main(int argc, char **argv) {
    
  Mat img_1 = imread("../1.png", CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread("../2.png", CV_LOAD_IMAGE_COLOR);
  Mat depth_1 = imread("../1_depth.png", CV_LOAD_IMAGE_UNCHANGED);
  Mat depth_2 = imread("../2_depth.png", CV_LOAD_IMAGE_UNCHANGED);
  
  vector<KeyPoint> keypoints_1, keypoints_2;
  vector<DMatch> matches;
  find_feature_matches ( img_1, img_2, keypoints_1, keypoints_2, matches );
  //输入相机内参
  Mat K = ( Mat_<double> ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 );
  vector<Point3f> pts1, pts2;
  
  for ( DMatch m:matches )
  {
      ushort d1 = depth_1.ptr<unsigned short> ( int ( keypoints_1[m.queryIdx].pt.y ) ) [ int ( keypoints_1[m.queryIdx].pt.x ) ];
      ushort d2 = depth_2.ptr<unsigned short> ( int ( keypoints_2[m.trainIdx].pt.y ) ) [ int ( keypoints_2[m.trainIdx].pt.x ) ];
      if ( d1==0 || d2==0 )  
          continue;
      Point2d p1 = pixel2cam ( keypoints_1[m.queryIdx].pt, K );
      Point2d p2 = pixel2cam ( keypoints_2[m.trainIdx].pt, K );
      float dd1 = float ( d1 ) /1000.0;
      float dd2 = float ( d2 ) /1000.0;
      pts1.push_back ( Point3f ( p1.x * dd1, p1.y * dd1, dd1 ) );
      pts2.push_back ( Point3f ( p2.x * dd2, p2.y * dd2, dd2 ) );
  }
  cout << "3d-3d pairs: " << pts1.size() << endl;
  Mat R,t;
  pose_estimation_3d3d ( pts1, pts2, R, t );
  
  cout << "R逆变换 = " << endl << R.t() << endl;
  cout << "t逆变换 = " << endl << -R.t()*t << endl;
  
  bundleAdjustment(pts1, pts2, R, t);

    return 0;
}

运行代码,其计算结果如图所示:

由于初值选在SVD求解后的R、t,所以收敛过程比较快。本题中用的迭代方法不是高斯牛顿法,在笔者尝试多次高斯牛顿法后发现一直迭代发散,换成LM算法后一下子就收敛了,可能这个求解问题属于一种病态求解吧。

 

8.在特征点匹配过程中,不可避免地会遇到误匹配的情况。如果把误匹配的情况输入到PnP中或ICP中,会发生什么情况?有哪些能避免误匹配的方法?

目前书中用的是根据汉明距离的暴力匹配方法,然后根据经验参数(30或者是最小距离的两倍)对匹配子根据其距离进行筛选。如果误匹配情况输入到PnP或是ICP中,再加上迭代算法选择不正确,初值估计不准确,就很容易导致计算结果产生误差,更有甚者会让迭代过程不稳定,甚至报错。目前比较流行的避免误匹配方法有:交叉匹配(在暴力匹配的基础上再匹配一次,如果两次结果一致,则认为是个特征点,如果不一致则滤掉,BFMatcher XX (NORM_HAMMING, true) )、KNN匹配(K邻近匹配,匹配时候选择K个与特征点相似的点,一般K是2,如果区别足够大,则选择最相似的点作为匹配点,bfMatcher->knnMatch(descriptors1, descriptors2, knnMatches, 2) )、RANSAC(随机采样一致性,利用两个图像之间的单应矩阵,根据重投影误差判定某个匹配是不是正确匹配,findHomography)等等,一般可以跟觉已有的成熟框架如ORB_SLAM2等等观察其对于不同场景所采取的避免误匹配的方法。同样,对于后端,在优化时可以用Huber损失函数等等增强优化算法的鲁棒性。

 

9.在Ceres中实现PnP和ICP的优化。

也就是将g2o优化转换成Ceres优化。由于《视觉SLAM十四讲》一书中主要运用的是重投影误差优化法,主要针对的是BA优化问题,而g2o工具对这一类问题求解比较准确方便,以至于对Ceres这工具不太了解。相交于g2o仅适用于BA问题的求解,Ceres更加通用对于最小贰乘问题的计算。通过本题我们可以增加对Ceres优化方法的认识,当然更多的大家还是去看Ceres官方教材。

对于PnP,CMakeLists.txt代码如下所示:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_9)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11 -O3")

list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)

find_package(Ceres REQUIRED)

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package(OpenCV REQUIRED)

include_directories(
  ${OpenCV_INCLUDE_DIRS}
  ${CERES_INCLUDE_DIRS}
  "/usr/include/eigen3/"
  "/home/fuhang/projects/feature_extraction"
  )

add_executable(homeworks7_9 homeworks7_9.cpp)
target_link_libraries( homeworks7_9 
   ${OpenCV_LIBS}
   ${CERES_LIBRARIES}
)

install(TARGETS homeworks7_9 RUNTIME DESTINATION bin)

homeworks7_9.cpp代码如下所示:

#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>

#include <Eigen/Core>
#include <Eigen/Geometry>

#include <ceres/ceres.h>
#include <ceres/rotation.h>


using namespace std;
using namespace cv;

//ORB特征识别匹配算法
void find_feature_matches(const cv::Mat& img_1, const cv::Mat& img_2, std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2, std::vector<cv::DMatch>& matches)
{
  Mat descriptors_1, descriptors_2;
  
  cv::Ptr<cv::FeatureDetector> detector = cv::ORB::create();
  cv::Ptr<cv::DescriptorExtractor> descriptor = cv::ORB::create();
  cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
  
  detector->detect(img_1, keypoints_1);
  detector->detect(img_2, keypoints_2);
  
  descriptor->compute(img_1, keypoints_1, descriptors_1);
  descriptor->compute(img_2, keypoints_2, descriptors_2);
  
  vector<DMatch> matchers;
  matcher->match(descriptors_1, descriptors_2, matchers);
  
  double min_dist = 10000, max_dist = 0;
  for (int i=0; i<descriptors_1.rows; i++)
  {
    double dist = matchers[i].distance;
    if (dist < min_dist) min_dist = dist;
    if (dist > max_dist) max_dist = dist;
  }
  
  for (int i=0; i<descriptors_1.rows; i++)
  {
    if (matchers[i].distance <= max(2*min_dist, 30.0))
      matches.push_back(matchers[i]);
  }
  
}


Point2d pixel2cam (const Point2d& p, const Mat& K)
{
  return Point2d
  (
    (p.x - K.at<double>(0,2)) / (K.at<double>(0,0)),
    (p.y - K.at<double>(1,2)) / (K.at<double>(1,1))
  );
}


struct PnPProblem
{ 
  //定义PnP结构体的接收数据:2D点、3D点、相机内参
  PnPProblem(double x, double y, double X, double Y, double Z, double cx, double cy, double fx, double fy)
  {
    x_ = x;
    y_ = y;
    
    X_ = X;
    Y_ = Y;
    Z_ = Z;
    
    cx_ = cx;
    cy_ = cy;
    
    fx_ = fx;
    fy_ = fy;
  }
  //定义函数模板,模板参数为位姿和残差
  template <typename T>
  bool operator()(const T* const pose, T* residual) const
  {
    //3D点储存进p数组
    T p[3];
    p[0] = T(X_);
    p[1] = T(Y_);
    p[2] = T(Z_);
    
    //相机位姿旋转
    T r[3];
    r[0] = pose[0];
    r[1] = pose[1];
    r[2] = pose[2];
    
    //3D点旋转
    T newP[3];
    ceres::AngleAxisRotatePoint(r, p, newP);
    
    //3D点平移
    newP[0] += pose[3];
    newP[1] += pose[4];
    newP[2] += pose[5];
    
    //像素坐标系到相机坐标系映射
    T projectX = fx_ * newP[0] / newP[2] + cx_;
    T projectY = fy_ * newP[1] / newP[2] + cy_;
    
    //重投影后的残差
    residual[0] = T(x_) - projectX;
    residual[1] = T(y_) - projectY;
    
    //不return就GG了
    return true;
  }
  
  double x_, y_;
  double X_, Y_, Z_;
  double cx_, cy_;
  double fx_, fy_;
};


void bundleAdjustmentCeres(const vector<Point3f> points_3d, const vector<Point2f> points_2d, const Mat& K, Mat& R, Mat& t, Mat& T)
{
  ceres::Problem problem;
  
  Mat rotateVector;
  Rodrigues(R, rotateVector);
  
  double pose[6];
  pose[0] = rotateVector.at<double>(0);
  pose[1] = rotateVector.at<double>(1);
  pose[2] = rotateVector.at<double>(2);
  pose[3] = t.at<double>(0);
  pose[4] = t.at<double>(1);
  pose[5] = t.at<double>(2);
  
  double fx = K.at<double>(0,0);
  double fy = K.at<double>(1,1);
  double cx = K.at<double>(0,2);
  double cy = K.at<double>(1,2);
  
  for (size_t i=0; i<points_3d.size(); i++)
  {
    //残差二维,待优化参数六维,不使用核函数
    problem.AddResidualBlock(
      new ceres::AutoDiffCostFunction<PnPProblem, 2, 6>(new PnPProblem(
	points_2d[i].x, points_2d[i].y,
	points_3d[i].x, points_3d[i].y, points_3d[i].z,
	cx, cy, 
	fx, fy
      )),
      nullptr,
      pose
    );
  }
  
  //使用QR求解
  ceres::Solver::Options options;
  options.linear_solver_type = ceres::DENSE_QR;
  //输出优化信息到std::cout
  options.minimizer_progress_to_stdout = true;
  
  //开始优化求解
  ceres::Solver::Summary summary;
  ceres::Solve (options, &problem, &summary);
  cout << summary.BriefReport() << endl;
  
  //输出坐标转化,由旋转向量转换为旋转矩阵
  rotateVector.at<double>(0) = pose[0];
  rotateVector.at<double>(1) = pose[1];
  rotateVector.at<double>(2) = pose[2];
  Rodrigues(rotateVector, R);
  
  t.at<double>(0) = pose[3];
  t.at<double>(1) = pose[4];
  t.at<double>(2) = pose[5];
  
  T = (Mat_<double>(4,4) << 
    R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), t.at<double>(0),
    R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), t.at<double>(1),
    R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), t.at<double>(2),
    0,                 0,                 0,                 1
  );
  cout << "优化后位姿参数T= " << endl << T << endl;
}


int main(int argc, char **argv) {
  
  //载入rgb图像  
  Mat img_1 = imread("../1.png", CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread("../2.png", CV_LOAD_IMAGE_COLOR);
  
  vector<KeyPoint> keypoints_1, keypoints_2;
  vector<DMatch> matches;
  find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
  
  //载入深度图像
  Mat depth_1 = imread("../1_depth.png", CV_LOAD_IMAGE_UNCHANGED);
  //输入相机内参矩阵
  Mat K = (Mat_<double> (3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  
  //定义3D2D点
  vector<Point3f> pts_3d;
  vector<Point2f> pts_2d_1, pts_2d_2;
  
  for (DMatch m:matches)
  {
    ushort d = depth_1.ptr<unsigned short> (int (keypoints_1[m.queryIdx].pt.y)) [int (keypoints_1[m.queryIdx].pt.x)];
      
    if (d == 0)
      continue;
    
    float dd = d/1000.0;
    
    Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
    pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));
    pts_2d_1.push_back(keypoints_1[m.queryIdx].pt);
    pts_2d_2.push_back(keypoints_2[m.trainIdx].pt);
  }
  
  //根据所得的3D、2D点进行PnP求解
  Mat r, t;
  solvePnP(pts_3d, pts_2d_2, K, Mat(), r, t, false, SOLVEPNP_EPNP);
  Mat R;
  Rodrigues(r, R);
  
  cout << "旋转向量r= " << endl << r << endl;
  cout << "旋转矩阵R= " << endl << R << endl;
  cout << "平移向量t= " << endl << t << endl;
  
  cout << "*************************开始优化***********************************" << endl;
  
  Mat T;  
  bundleAdjustmentCeres(pts_3d, pts_2d_2, K, R, t, T);
  
    return 0;
    
}

由Ceres计算得到的求解PnP结果如下图所示:

可以看出Ceres最后迭代的结果和g2o差不多,但是Ceres相交于g2o而言代码更加简洁,而且迭代步数更加少,可增补性强。同理,对于ICP的Ceres优化方法也是类似。

CMakeLists.txt代码如下:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_9_icp)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11 -O3")

list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)

find_package(Ceres REQUIRED)

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package(OpenCV REQUIRED)

include_directories(
  ${OpenCV_INCLUDE_DIRS}
  ${CERES_INCLUDE_DIRS}
  "/usr/include/eigen3/"
  "/home/fuhang/projects/feature_extraction"
  )

add_executable(homeworks7_9_icp homeworks7_9_icp.cpp)
target_link_libraries( homeworks7_9_icp 
   ${OpenCV_LIBS}
   ${CERES_LIBRARIES}
)

install(TARGETS homeworks7_9_icp RUNTIME DESTINATION bin)

homeworks7_9_icp.cpp代码如下所示:

#include <iostream>

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>

#include <Eigen/Core>
#include <Eigen/SVD>
#include <Eigen/Geometry>

#include <ceres/ceres.h>
#include <ceres/rotation.h>

using namespace std;
using namespace cv;

void find_feature_matches (const Mat& img_1, const Mat& img_2, vector<KeyPoint>& keypoints_1, vector<KeyPoint>& keypoints_2, vector<DMatch>& matches)
{
  Mat descriptors_1, descriptors_2;
  
  Ptr<FeatureDetector> detector = ORB::create();
  Ptr<DescriptorExtractor> descriptor = ORB::create();
  Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
  
  detector->detect(img_1, keypoints_1);
  detector->detect(img_2, keypoints_2);
  descriptor->compute(img_1, keypoints_1, descriptors_1);
  descriptor->compute(img_2, keypoints_2, descriptors_2);
  
  vector<DMatch> matchers;  
  matcher->match(descriptors_1, descriptors_2, matchers);  
  double min_dist = 10000, max_dist = 0;
  for (int i=0; i<descriptors_1.rows; i++)
  {
    if (matchers[i].distance < min_dist) min_dist = matchers[i].distance;
    if (matchers[i].distance > max_dist) max_dist = matchers[i].distance;
  }
  
  for (auto m:matchers)
  {
    if (m.distance <= max(30.0, 2*min_dist))
      matches.push_back(m);
  }
  
  cout << "共找到" << matches.size() << "对匹配点" << endl;
}


Point2d pixel2cam (const Point2d& p, const Mat& K)
{
    return Point2d
           (
               ( p.x - K.at<double> ( 0,2 ) ) / K.at<double> ( 0,0 ),
               ( p.y - K.at<double> ( 1,2 ) ) / K.at<double> ( 1,1 )
           );
}

void pose_estimation_3d3d ( const vector<Point3f>& pts1, const vector<Point3f>& pts2, Mat& R, Mat& t)
{
  Point3f p1, p2;
  int N = pts1.size();
  for (int i=0; i<N; i++)
  {
     p1 += pts1[i];
     p2 += pts2[i];
  }
  p1 /= N;
  p2 /= N;
  vector<Point3f> q1(N), q2(N);
  for (int i=0; i<N; i++)
  {
    q1[i] = pts1[i] - p1;
    q2[i] = pts2[i] - p2;
  }
  
  Eigen::Matrix3d W = Eigen::Matrix3d::Zero();
  for (int i=0; i<N; i++)
  {
    // W = q1*q2^T
    W += Eigen::Vector3d(q1[i].x, q1[i].y, q1[i].z) * Eigen::Vector3d(q2[i].x, q2[i].y, q2[i].z).transpose();
  }
  cout << "W=" << endl << W << endl;
  //SVD求解
  Eigen::JacobiSVD<Eigen::Matrix3d> svd (W, Eigen::ComputeFullU|Eigen::ComputeFullV);
  Eigen::Matrix3d U = svd.matrixU();
  Eigen::Matrix3d V = svd.matrixV();
  
  if (U.determinant() * V.determinant() < 0)
  {
    for (int x=0; x<3; ++x)
    {
      U(x, 2) *= -1;
    }
  }
  
  cout << "U= " << endl << U << endl;
  cout << "V= " << endl << V << endl;
  
  
  Eigen::Matrix3d R_ = U * (V.transpose());
  Eigen::Vector3d t_ = Eigen::Vector3d (p1.x, p1.y, p1.z) - R_ * Eigen::Vector3d (p2.x, p2.y, p2.z);
  
  R = ( Mat_<double> (3,3) <<
    R_(0,0), R_(0,1), R_(0,2),
    R_(1,0), R_(1,1), R_(1,2),
    R_(2,0), R_(2,1), R_(2,2)
  );
  
  t = (Mat_<double> (3,1) << t_(0,0), t_(1,0), t_(2,0));
  
  cout << "经过SVD求解后,R=" << endl << R << endl << "t=" << t << endl;
}

struct ICPPoblem
{
  ICPPoblem(double x1, double y1, double z1, double x2, double y2, double z2)
  {
      x1_ = x1;
      y1_ = y1;
      z1_ = z1;
      
      x2_ = x2;
      y2_ = y2;
      z2_ = z2;
  }
  
  template <typename T>
  bool operator()(
    const T* const pose, T* residual) const
    {
      //第二帧里的3D点
      T p[3];
      p[0] = T(x2_);
      p[1] = T(y2_);
      p[2] = T(z2_);
      
      T r[3];
      r[0] = pose[0];
      r[1] = pose[1];
      r[2] = pose[2];
      
      T newP[3];
      ceres::AngleAxisRotatePoint(r, p, newP);
      
      newP[0] += pose[3];
      newP[1] += pose[4];
      newP[2] += pose[5];
      
      //由于是3D-3D,所以误差为3维
      residual[0] = T(x1_) - newP[0];
      residual[1] = T(y1_) - newP[1];
      residual[2] = T(z1_) - newP[2];
      
      return true;
    } 
  
  double x1_, y1_, z1_;
  double x2_, y2_, z2_;
};


void bundleAdjustment (const vector<Point3f>& pts1, const vector<Point3f>& pts2, Mat& R, Mat& t, Mat& T)
{
  ceres::Problem problem;
  
  Mat rotateVector;
  Rodrigues(R, rotateVector);
  
  double pose[6];
  pose[0] = rotateVector.at<double>(0);
  pose[1] = rotateVector.at<double>(1);
  pose[2] = rotateVector.at<double>(2);
  pose[3] = t.at<double>(0);
  pose[4] = t.at<double>(1);
  pose[5] = t.at<double>(2);
  
  
  for (size_t i=0; i<pts1.size(); i++)
  {
    //残差二维,待优化参数六维,不使用核函数
    problem.AddResidualBlock(
      new ceres::AutoDiffCostFunction<ICPPoblem, 3, 6>(new ICPPoblem(
	pts1[i].x, pts1[i].y, pts1[i].z,
	pts2[i].x, pts2[i].y, pts2[i].z
      )),
      nullptr,
      pose
    );
  }
  
  //使用QR求解
  ceres::Solver::Options options;
  options.linear_solver_type = ceres::DENSE_QR;
  //输出优化信息到std::cout
  options.minimizer_progress_to_stdout = true;
  
  //开始优化求解
  ceres::Solver::Summary summary;
  ceres::Solve (options, &problem, &summary);
  cout << summary.BriefReport() << endl;
  
  //输出坐标转化,由旋转向量转换为旋转矩阵
  rotateVector.at<double>(0) = pose[0];
  rotateVector.at<double>(1) = pose[1];
  rotateVector.at<double>(2) = pose[2];
  Rodrigues(rotateVector, R);
  
  t.at<double>(0) = pose[3];
  t.at<double>(1) = pose[4];
  t.at<double>(2) = pose[5];
  
  T = (Mat_<double>(4,4) << 
    R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), t.at<double>(0),
    R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), t.at<double>(1),
    R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), t.at<double>(2),
    0,                 0,                 0,                 1
  );
  cout << "优化后位姿参数T= " << endl << T << endl;
  
}


int main(int argc, char **argv) {
    
  Mat img_1 = imread("../1.png", CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread("../2.png", CV_LOAD_IMAGE_COLOR);
  Mat depth_1 = imread("../1_depth.png", CV_LOAD_IMAGE_UNCHANGED);
  Mat depth_2 = imread("../2_depth.png", CV_LOAD_IMAGE_UNCHANGED);
  
  vector<KeyPoint> keypoints_1, keypoints_2;
  vector<DMatch> matches;
  find_feature_matches ( img_1, img_2, keypoints_1, keypoints_2, matches );
  //输入相机内参
  Mat K = ( Mat_<double> ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 );
  vector<Point3f> pts1, pts2;
  
  for ( DMatch m:matches )
  {
      ushort d1 = depth_1.ptr<unsigned short> ( int ( keypoints_1[m.queryIdx].pt.y ) ) [ int ( keypoints_1[m.queryIdx].pt.x ) ];
      ushort d2 = depth_2.ptr<unsigned short> ( int ( keypoints_2[m.trainIdx].pt.y ) ) [ int ( keypoints_2[m.trainIdx].pt.x ) ];
      if ( d1==0 || d2==0 )  
          continue;
      Point2d p1 = pixel2cam ( keypoints_1[m.queryIdx].pt, K );
      Point2d p2 = pixel2cam ( keypoints_2[m.trainIdx].pt, K );
      float dd1 = float ( d1 ) /1000.0;
      float dd2 = float ( d2 ) /1000.0;
      pts1.push_back ( Point3f ( p1.x * dd1, p1.y * dd1, dd1 ) );
      pts2.push_back ( Point3f ( p2.x * dd2, p2.y * dd2, dd2 ) );
  }
  cout << "3d-3d pairs: " << pts1.size() << endl;
  Mat R,t;
  pose_estimation_3d3d ( pts1, pts2, R, t );
  
  cout << "R逆变换 = " << endl << R.t() << endl;
  cout << "t逆变换 = " << endl << -R.t()*t << endl;
  
  Mat T;
  bundleAdjustment(pts1, pts2, R, t, T);

    return 0;
}

程序运行结果如下图所示:

不难发现对于ICP问题仅仅需要迭代一步就好了,相交于g2o而言,g2o用高斯牛顿法迭代还会发散,但是据说g2o对于BA问题的求解精度还是很高的。

  • 10
    点赞
  • 54
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值