PCL 基于对应点分类的对象识别

2019.11.18更新

#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>     //点云类型头文件
#include <pcl/correspondence.h>   //对应表示两个实体之间的匹配(例如,点,描述符等)。
#include <pcl/features/normal_3d_omp.h>   //法线
#include <pcl/features/shot_omp.h>    //描述子
#include <pcl/features/board.h>       
#include <pcl/keypoints/uniform_sampling.h>   //均匀采样
#include <pcl/recognition/cg/hough_3d.h>    //hough算子
#include <pcl/recognition/cg/geometric_consistency.h>  //几何一致性
#include <pcl/visualization/pcl_visualizer.h>   //可视化
#include <pcl/kdtree/kdtree_flann.h>             //配准方法
#include <pcl/kdtree/impl/kdtree_flann.hpp>      //
#include <pcl/common/transforms.h>             //转换矩阵
#include <pcl/console/parse.h>


#include <pcl/features/moment_of_inertia_estimation.h>//惯性矩估计的头文件

typedef pcl::PointXYZ PointType;  //PointXYZRGBA数据结构
typedef pcl::Normal NormalType;       //法线结构
typedef pcl::ReferenceFrame RFType;    //参考帧
typedef pcl::SHOT352 DescriptorType;   //SHOT特征的数据结构http://www.cnblogs.com/li-yao7758258/p/6612856.html

std::string model_filename_;   //模型的文件名
std::string scene_filename_;

//Algorithm params
bool show_keypoints_(true);
bool show_correspondences_(true);
bool use_cloud_resolution_(false);
bool use_hough_(true);
float model_ss_(0.01f);
float scene_ss_(0.03f);
float rf_rad_(0.015f);
float descr_rad_(0.02f);
float cg_size_(0.01f);
float cg_thresh_(5.0f);

//为了提取OBB创建一个结构体 viewer->addCube (position, quat, max_point_OBB.x - min_point_OBB.x,
// max_point_OBB.y - min_point_OBB.y, max_point_OBB.z - min_point_OBB.z, "OBB");
struct OBB
{
    Eigen::Vector3f position;
    Eigen::Quaternionf quat;
    pcl::PointXYZ min_point_OBB;
    pcl::PointXYZ max_point_OBB;

};
OBB findOBBPoint(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in);
std::vector<pcl::PointXYZ> findCubePoint(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in);//找到点云的两个对角点并返回
pcl::PointCloud<pcl::PointXYZ>::Ptr xyzRGBA2xyz(pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_in);
void
showHelp(char *filename)
{
    std::cout << std::endl;
    std::cout << "***************************************************************************" << std::endl;
    std::cout << "*                                                                         *" << std::endl;
    std::cout << "*             Correspondence Grouping Tutorial - Usage Guide              *" << std::endl;
    std::cout << "*                                                                         *" << std::endl;
    std::cout << "***************************************************************************" << std::endl << std::endl;
    std::cout << "Usage: " << filename << " model_filename.pcd scene_filename.pcd [Options]" << std::endl << std::endl;
    std::cout << "Options:" << std::endl;
    std::cout << "     -h:                     Show this help." << std::endl;
    std::cout << "     -k:                     Show used keypoints." << std::endl;
    std::cout << "     -c:                     Show used correspondences." << std::endl;
    std::cout << "     -r:                     Compute the model cloud resolution and multiply" << std::endl;
    std::cout << "                             each radius given by that value." << std::endl;
    std::cout << "     --algorithm (Hough|GC): Clustering algorithm used (default Hough)." << std::endl;
    std::cout << "     --model_ss val:         Model uniform sampling radius (default 0.01)" << std::endl;
    std::cout << "     --scene_ss val:         Scene uniform sampling radius (default 0.03)" << std::endl;
    std::cout << "     --rf_rad val:           Reference frame radius (default 0.015)" << std::endl;
    std::cout << "     --descr_rad val:        Descriptor radius (default 0.02)" << std::endl;
    std::cout << "     --cg_size val:          Cluster size (default 0.01)" << std::endl;
    std::cout << "     --cg_thresh val:        Clustering threshold (default 5)" << std::endl << std::endl;
}

void
parseCommandLine(int argc, char *argv[])
{
    //Show help
    if (pcl::console::find_switch(argc, argv, "-h"))
    {
        showHelp(argv[0]);
        exit(0);
    }

    //Model & scene filenames
    std::vector<int> filenames;
    filenames = pcl::console::parse_file_extension_argument(argc, argv, ".pcd");
    if (filenames.size() != 2)
    {
        for (int i = 0; i < filenames.size(); i++)
            std::cout << filenames.at(i) << endl;
        std::cout << "Filenames missing.\n";
        showHelp(argv[0]);
        exit(-1);
    }

    model_filename_ = argv[filenames[0]];
    scene_filename_ = argv[filenames[1]];

    //Program behavior
    if (pcl::console::find_switch(argc, argv, "-k"))
    {
        show_keypoints_ = true;
    }
    if (pcl::console::find_switch(argc, argv, "-c"))
    {
        show_correspondences_ = true;
    }
    if (pcl::console::find_switch(argc, argv, "-r")) //计算点云的分辨率和多样性
    {
        use_cloud_resolution_ = true;
    }

    std::string used_algorithm;
    if (pcl::console::parse_argument(argc, argv, "--algorithm", used_algorithm) != -1)
    {
        if (used_algorithm.compare("Hough") == 0)
        {
            use_hough_ = true;
        }
        else if (used_algorithm.compare("GC") == 0)
        {
            use_hough_ = false;
        }
        else
        {
            std::cout << "Wrong algorithm name.\n";
            showHelp(argv[0]);
            exit(-1);
        }
    }

    //General parameters
    pcl::console::parse_argument(argc, argv, "--model_ss", model_ss_);
    pcl::console::parse_argument(argc, argv, "--scene_ss", scene_ss_);
    pcl::console::parse_argument(argc, argv, "--rf_rad", rf_rad_);
    pcl::console::parse_argument(argc, argv, "--descr_rad", descr_rad_);
    pcl::console::parse_argument(argc, argv, "--cg_size", cg_size_);
    pcl::console::parse_argument(argc, argv, "--cg_thresh", cg_thresh_);
}

double
computeCloudResolution(const pcl::PointCloud<PointType>::ConstPtr &cloud)//计算点云分辨率
{
    double res = 0.0;
    int n_points = 0;
    int nres;
    std::vector<int> indices(2);
    std::vector<float> sqr_distances(2);
    pcl::search::KdTree<PointType> tree;
    tree.setInputCloud(cloud);   //设置输入点云

    for (size_t i = 0; i < cloud->size(); ++i)
    {
        if (!pcl_isfinite((*cloud)[i].x))
        {
            continue;
        }
        //Considering the second neighbor since the first is the point itself.
        //运算第二个临近点,因为第一个点是它本身
        nres = tree.nearestKSearch(i, 2, indices, sqr_distances);//return :number of neighbors found 
        if (nres == 2)
        {
            res += sqrt(sqr_distances[1]);
            ++n_points;
        }
    }
    if (n_points != 0)
    {
        res /= n_points;
    }
    return res;
}

int
main(int argc, char *argv[])
{
    parseCommandLine(argc, argv);

    pcl::PointCloud<PointType>::Ptr model(new pcl::PointCloud<PointType>()); //模型点云
    pcl::PointCloud<PointType>::Ptr model_keypoints(new pcl::PointCloud<PointType>());//模型角点
    pcl::PointCloud<PointType>::Ptr scene(new pcl::PointCloud<PointType>());  //目标点云
    pcl::PointCloud<PointType>::Ptr scene_keypoints(new pcl::PointCloud<PointType>());//目标角点
    pcl::PointCloud<NormalType>::Ptr model_normals(new pcl::PointCloud<NormalType>()); //法线
    pcl::PointCloud<NormalType>::Ptr scene_normals(new pcl::PointCloud<NormalType>());
    pcl::PointCloud<DescriptorType>::Ptr model_descriptors(new pcl::PointCloud<DescriptorType>()); //描述子
    pcl::PointCloud<DescriptorType>::Ptr scene_descriptors(new pcl::PointCloud<DescriptorType>());

    //载入文件  载入模型(模板)
    if (pcl::io::loadPCDFile(model_filename_, *model) < 0)
    {
        std::cout << "Error loading model cloud." << std::endl;
        showHelp(argv[0]);
        return (-1);
    }
    if (pcl::io::loadPCDFile(scene_filename_, *scene) < 0)  //载入场景
    {
        std::cout << "Error loading scene cloud." << std::endl;
        showHelp(argv[0]);
        return (-1);
    }


    // 设置分辨率
    if (use_cloud_resolution_)
    {
        float resolution = static_cast<float> (computeCloudResolution(model));
        if (resolution != 0.0f)
        {
            model_ss_ *= resolution;
            scene_ss_ *= resolution;
            rf_rad_ *= resolution;
            descr_rad_ *= resolution;
            cg_size_ *= resolution;
        }

        std::cout << "Model resolution:       " << resolution << std::endl;
        std::cout << "Model sampling size:    " << model_ss_ << std::endl;
        std::cout << "Scene sampling size:    " << scene_ss_ << std::endl;
        std::cout << "LRF support radius:     " << rf_rad_ << std::endl;
        std::cout << "SHOT descriptor radius: " << descr_rad_ << std::endl;
        std::cout << "Clustering bin size:    " << cg_size_ << std::endl << std::endl;
    }

    //计算法线 normalestimationomp估计局部表面特性在每个三个特点,分别表面的法向量和曲率,平行,使用OpenMP标准。//初始化调度程序并设置要使用的线程数(默认为0)。
    pcl::NormalEstimationOMP<PointType, NormalType> norm_est;
    norm_est.setKSearch(10);       //设置K邻域搜索阀值为10个点
    norm_est.setInputCloud(model);  //设置输入点云
    norm_est.compute(*model_normals);   //计算点云法线 

    norm_est.setInputCloud(scene);
    norm_est.compute(*scene_normals);
    //计算法线


    //均匀采样点云并提取关键点      体素下采样,重心代替
    pcl::PointCloud<int> sampled_indices;
    pcl::UniformSampling<PointType> uniform_sampling;
    uniform_sampling.setInputCloud(model);  //输入点云
    uniform_sampling.setRadiusSearch(model_ss_);   //设置半径 model_ss_初值是0.01可以通过agv修改
	uniform_sampling.compute (sampled_indices);
  	pcl::copyPointCloud (*model, sampled_indices.points, *model_keypoints);
  	std::cout << "Model total points: " << model->size () << "; Selected Keypoints: " << 			model_keypoints->size () << std::endl;

   uniform_sampling.setInputCloud (scene);
   uniform_sampling.setRadiusSearch (scene_ss_);
   uniform_sampling.compute (sampled_indices);
   pcl::copyPointCloud (*scene, sampled_indices.points, *scene_keypoints);
   std::cout << "Scene total points: " << scene->size() << "; Selected Keypoints: " << scene_keypoints->size() << std::endl;
    //均匀采样点云并提取关键点      体素下采样,重心代替



    //为关键点计算描述子
    pcl::SHOTEstimationOMP<PointType, NormalType, DescriptorType> descr_est;
    descr_est.setRadiusSearch(descr_rad_);  //设置搜索半径

    descr_est.setInputCloud(model_keypoints);  //输入模型的关键点
    descr_est.setInputNormals(model_normals);  //输入模型的法线
    descr_est.setSearchSurface(model);         //输入的点云
    descr_est.compute(*model_descriptors);     //计算描述子

    descr_est.setInputCloud(scene_keypoints);  //同理
    descr_est.setInputNormals(scene_normals);
    descr_est.setSearchSurface(scene);
    descr_est.compute(*scene_descriptors);

    //  使用Kdtree找出 Model-Scene 匹配点
    pcl::CorrespondencesPtr model_scene_corrs(new pcl::Correspondences());

    pcl::KdTreeFLANN<DescriptorType> match_search;   //设置配准的方法
    match_search.setInputCloud(model_descriptors);  //输入模板点云的描述子

    //每一个场景的关键点描述子都要找到模板中匹配的关键点描述子并将其添加到对应的匹配向量中。
    for (size_t i = 0; i < scene_descriptors->size(); ++i)
    {
        std::vector<int> neigh_indices(1);   //设置最近邻点的索引
        std::vector<float> neigh_sqr_dists(1); //申明最近邻平方距离值
        if (!pcl_isfinite(scene_descriptors->at(i).descriptor[0])) //忽略 NaNs点
        {
            continue;
        }
        int found_neighs = match_search.nearestKSearch(scene_descriptors->at(i), 1, neigh_indices, neigh_sqr_dists);
        //scene_descriptors->at (i)是给定点云 1是临近点个数 ,neigh_indices临近点的索引  neigh_sqr_dists是与临近点的索引

        if (found_neighs == 1 && neigh_sqr_dists[0] < 0.25f) // 仅当描述子与临近点的平方距离小于0.25(描述子与临近的距离在一般在0到1之间)才添加匹配
        {
            //neigh_indices[0]给定点,  i  是配准数  neigh_sqr_dists[0]与临近点的平方距离
            pcl::Correspondence corr(neigh_indices[0], static_cast<int> (i), neigh_sqr_dists[0]);
            model_scene_corrs->push_back(corr);   //把配准的点存储在容器中
        }
    }
    std::cout << "Correspondences found: " << model_scene_corrs->size() << std::endl;


    //  实际的配准方法的实现
    std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;
    std::vector<pcl::Correspondences> clustered_corrs;

    //  使用 Hough3D算法寻找匹配点
    if (use_hough_)
    {
        //
        //  Compute (Keypoints) Reference Frames only for Hough
        //计算参考帧的Hough(也就是关键点)
        cout << "使用3d hough 匹配方法" << endl;
        pcl::PointCloud<RFType>::Ptr model_rf(new pcl::PointCloud<RFType>());
        pcl::PointCloud<RFType>::Ptr scene_rf(new pcl::PointCloud<RFType>());
        //特征估计的方法(点云,法线,参考帧)
        pcl::BOARDLocalReferenceFrameEstimation<PointType, NormalType, RFType> rf_est;
        rf_est.setFindHoles(true);
        rf_est.setRadiusSearch(rf_rad_);   //设置搜索半径

        rf_est.setInputCloud(model_keypoints);  //模型关键点
        rf_est.setInputNormals(model_normals); //模型法线
        rf_est.setSearchSurface(model);    //模型
        rf_est.compute(*model_rf);      //模型的参考帧   

        rf_est.setInputCloud(scene_keypoints);  //同理
        rf_est.setInputNormals(scene_normals);
        rf_est.setSearchSurface(scene);
        rf_est.compute(*scene_rf);

        //  Clustering聚类的方法

        //对输入点与的聚类,以区分不同的实例的场景中的模型 
        pcl::Hough3DGrouping<PointType, PointType, RFType, RFType> clusterer;
        clusterer.setHoughBinSize(cg_size_);//霍夫空间设置每个bin的大小
        clusterer.setHoughThreshold(cg_thresh_);//阀值
        clusterer.setUseInterpolation(true);
        clusterer.setUseDistanceWeight(false);

        clusterer.setInputCloud(model_keypoints);
        clusterer.setInputRf(model_rf);   //设置输入的参考帧
        clusterer.setSceneCloud(scene_keypoints);
        clusterer.setSceneRf(scene_rf);
        clusterer.setModelSceneCorrespondences(model_scene_corrs);//model_scene_corrs存储配准的点

        //clusterer.cluster (clustered_corrs);辨认出聚类的对象
        clusterer.recognize(rototranslations, clustered_corrs);
    }
    else // Using GeometricConsistency  或者使用几何一致性性质
    {
        cout << "使用几何一致性方法" << endl;
        pcl::GeometricConsistencyGrouping<PointType, PointType> gc_clusterer;
        gc_clusterer.setGCSize(cg_size_);   //设置几何一致性的大小
        gc_clusterer.setGCThreshold(cg_thresh_); //阀值

        gc_clusterer.setInputCloud(model_keypoints);
        gc_clusterer.setSceneCloud(scene_keypoints);
        gc_clusterer.setModelSceneCorrespondences(model_scene_corrs);

        //gc_clusterer.cluster (clustered_corrs);
        gc_clusterer.recognize(rototranslations, clustered_corrs);
    }

    //输出的结果  找出输入模型是否在场景中出现
    std::cout << "Model instances found: " << rototranslations.size() << std::endl;
    for (size_t i = 0; i < rototranslations.size(); ++i)
    {
        std::cout << "\n    Instance " << i + 1 << ":" << std::endl;
        std::cout << "        Correspondences belonging to this instance: " << clustered_corrs[i].size() << std::endl;

        // 打印处相对于输入模型的旋转矩阵与平移矩阵
        Eigen::Matrix3f rotation = rototranslations[i].block<3, 3>(0, 0);
        Eigen::Vector3f translation = rototranslations[i].block<3, 1>(0, 3);

        printf("\n");
        printf("            | %6.3f %6.3f %6.3f | \n", rotation(0, 0), rotation(0, 1), rotation(0, 2));
        printf("        R = | %6.3f %6.3f %6.3f | \n", rotation(1, 0), rotation(1, 1), rotation(1, 2));
        printf("            | %6.3f %6.3f %6.3f | \n", rotation(2, 0), rotation(2, 1), rotation(2, 2));
        printf("\n");
        printf("        t = < %0.3f, %0.3f, %0.3f >\n", translation(0), translation(1), translation(2));
    }

    //可视化
    pcl::visualization::PCLVisualizer viewer("Correspondence Grouping");
    viewer.initCameraParameters ();
	//viewer.addCoordinateSystem();
    viewer.addPointCloud(scene, "scene_cloud");//可视化场景点云

    pcl::PointCloud<PointType>::Ptr off_scene_model(new pcl::PointCloud<PointType>());
    pcl::PointCloud<PointType>::Ptr off_scene_model_keypoints(new pcl::PointCloud<PointType>());

    if (show_correspondences_ || show_keypoints_)  //可视化配准点
    {
        //  We are translating the model so that it doesn't end in the middle of the scene representation
        //就是要对输入的模型进行旋转与平移,使其在可视化界面的中间位置
        pcl::transformPointCloud(*model, *off_scene_model, Eigen::Vector3f(-3, 0, 0), Eigen::Quaternionf(1, 0, 0, 0));
        pcl::transformPointCloud(*model_keypoints, *off_scene_model_keypoints, Eigen::Vector3f(-3, 0, 0), Eigen::Quaternionf(1, 0, 0, 0));  //因为模型的位置变化了,所以要对特征点进行变化

        ///对模型off_scene_model 模型点云上颜色 模型显示为绿色
        pcl::visualization::PointCloudColorHandlerCustom<PointType> off_scene_model_color_handler(off_scene_model, 0, 255, 0);
        viewer.addPointCloud(off_scene_model, off_scene_model_color_handler, "off_scene_model");
        viewer.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 4, "off_scene_model");


        //寻找AABB包围盒
        //pcl::PointCloud<pcl::PointXYZ>::Ptr model = xyzRGBA2xyz(off_scene_model);   //使用函数进行类型转换 将XYZRGBA转换成XYZ
        std::vector<pcl::PointXYZ> Cube2Point = findCubePoint(off_scene_model);  //找到点云的两个对角点 第一个元素是最小,第二个是最大
        viewer.addCube(Cube2Point[0].x,Cube2Point[1].x,Cube2Point[0].y,Cube2Point[1].y,Cube2Point[0].z,Cube2Point[1].z,0,0.0,255.0,"AABB");
        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_OPACITY,1,"AABB");//线的不透明度
        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_LINE_WIDTH,10,"AABB");//边界线宽

//        //寻找模型的包围盒OBB
//     //   pcl::PointCloud<pcl::PointXYZ>::Ptr model = xyzRGBA2xyz(off_scene_model);   //使用函数进行类型转换 将XYZRGBA转换成XYZ
//        OBB obbPoint = findOBBPoint(off_scene_model);
//        viewer.addCube(obbPoint.position,obbPoint.quat,
//                obbPoint.max_point_OBB.x - obbPoint.min_point_OBB.x,
//                obbPoint.max_point_OBB.y - obbPoint.min_point_OBB.y,
//                obbPoint.max_point_OBB.z - obbPoint.min_point_OBB.z,"OBB");
//        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_COLOR,255,0,0,"OBB");
//        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_OPACITY,1,"OBB");
//        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_LINE_WIDTH,20,"OBB");

    }

/****

    if (show_keypoints_)   //可视化关键点
    {
        pcl::visualization::PointCloudColorHandlerCustom<PointType> scene_keypoints_color_handler(scene_keypoints, 0, 0, 255);  //对场景中的点云特征点上为绿色
        viewer.addPointCloud(scene_keypoints, scene_keypoints_color_handler, "scene_keypoints");
        viewer.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 5, "scene_keypoints");

        pcl::visualization::PointCloudColorHandlerCustom<PointType> off_scene_model_keypoints_color_handler(off_scene_model_keypoints, 0, 0, 255);
        viewer.addPointCloud(off_scene_model_keypoints, off_scene_model_keypoints_color_handler, "off_scene_model_keypoints");
        viewer.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 5, "off_scene_model_keypoints");
    }

*****/
	//场景显示为白色
	pcl::visualization::PointCloudColorHandlerCustom<PointType> rgb(scene, 255, 255, 255);
   // pcl::visualization::PointCloudColorHandlerRGBField<PointType> rgb(scene);
    viewer.addPointCloud(scene,rgb,"The Scene");
    viewer.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 2, "The Scene");

    ///识别场景中的模型rotated_model,显示为红色
    for (size_t i = 0; i < rototranslations.size(); ++i)
    {
        pcl::PointCloud<PointType>::Ptr rotated_model(new pcl::PointCloud<PointType>());
        pcl::transformPointCloud(*model, *rotated_model, rototranslations[i]);//把model转化为rotated_model
        //    <Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >   rototranslations是射影变换矩阵
        std::stringstream ss_cloud;
        ss_cloud << "instance" << i;

        pcl::visualization::PointCloudColorHandlerCustom<PointType> rotated_model_color_handler(rotated_model, 255, 0, 0);
        viewer.addPointCloud(rotated_model, rotated_model_color_handler, ss_cloud.str());
		viewer.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 4, ss_cloud.str());

        //寻找包围盒AABB
        //pcl::PointCloud<pcl::PointXYZ>::Ptr model_scene = xyzRGBA2xyz(rotated_model);//使用函数进行类型转换 将XYZRGBA转换成XYZ
        std::vector<pcl::PointXYZ> Cube2Point_scene = findCubePoint(rotated_model);
        std::cout<<"\n-----------------------------\n"
                 <<Cube2Point_scene[0]<<"\n"
                 <<Cube2Point_scene[1]<<"\n"
                 <<Cube2Point_scene[2]
                 <<"\n-------------------------------\n"<<std::endl;
        viewer.addCube(Cube2Point_scene[0].x,Cube2Point_scene[1].x,Cube2Point_scene[0].y,Cube2Point_scene[1].y,Cube2Point_scene[0].z,Cube2Point_scene[1].z,0,0,255.0,"AABBscene");
        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_OPACITY,1,"AABBscene");//线的不透明度
        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_LINE_WIDTH,10,"AABBscene");//边界线宽

//        //寻找包围盒OBB
//        OBB obbPoint = findOBBPoint(rotated_model);
//        viewer.addCube(obbPoint.position,obbPoint.quat,
//                       obbPoint.max_point_OBB.x - obbPoint.min_point_OBB.x,
//                       obbPoint.max_point_OBB.y - obbPoint.min_point_OBB.y,
//                       obbPoint.max_point_OBB.z - obbPoint.min_point_OBB.z,"OBBscene");
//        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_COLOR,0,0,255,"OBBscene");
//        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_OPACITY,1,"OBBscene");
//        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_LINE_WIDTH,20,"OBBscene");

        if (show_correspondences_)   //显示配准连接
        {
            for (size_t j = 0; j < clustered_corrs[i].size(); ++j)
            {
                std::stringstream ss_line;
                ss_line << "correspondence_line" << i << "_" << j;
                PointType& model_point = off_scene_model_keypoints->at(clustered_corrs[i][j].index_query);
                PointType& scene_point = scene_keypoints->at(clustered_corrs[i][j].index_match);

                //  We are drawing a line for each pair of clustered correspondences found between the model and the scene
                viewer.addLine<PointType, PointType>(model_point, scene_point, 0, 255, 0, ss_line.str());
            }
        }
    }

//    viewer.addCoordinateSystem();
    while (!viewer.wasStopped())
    {
        viewer.spinOnce();
    }

    return (0);
}

//该函数找到AABB包围盒
std::vector<pcl::PointXYZ> findCubePoint(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in)
{

    std::vector<pcl::PointXYZ> AABB;//存放AABB的两个角点

    std::vector <float> moment_of_inertia;//存放惯性距的特征向量
    std::vector <float> eccentricity;//存放偏心率的特征向量
    pcl::PointXYZ min_point_AABB;
    pcl::PointXYZ max_point_AABB;

    pcl::PointXYZ min_point_OBB;
    pcl::PointXYZ max_point_OBB;
    pcl::PointXYZ position_OBB;
    Eigen::Matrix3f rotational_matrix_OBB;
    float major_value, middle_value, minor_value;
    Eigen::Vector3f major_vector, middle_vector, minor_vector;
    Eigen::Vector3f mass_center;


    pcl::MomentOfInertiaEstimation <pcl::PointXYZ> feature_extractor;//实例化一个对象
    feature_extractor.setInputCloud (cloud_in);//设置输入点云
    feature_extractor.compute ();//开始特征计算

    feature_extractor.getMomentOfInertia (moment_of_inertia);//计算出的惯性矩
    feature_extractor.getEccentricity (eccentricity);//计算出的偏心率
    feature_extractor.getAABB (min_point_AABB, max_point_AABB);//计算轴对称边界盒子
    AABB.push_back(min_point_AABB);
    AABB.push_back(max_point_AABB);//保存AABB的两个角点

    return AABB;
}

//点云类型转换
pcl::PointCloud<pcl::PointXYZ>::Ptr xyzRGBA2xyz(pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_in) {

    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZ>);
    cloud_out->resize(cloud_in->size());
    cloud_out->height = 1;
    cloud_out->width = cloud_out->size();
    cloud_out->is_dense = false;

    for (int i = 0; i < cloud_in->size(); i++) {
        pcl::PointXYZ p;
        p.x = cloud_in->points[i].x;
        p.y = cloud_in->points[i].y;
        p.z = cloud_in->points[i].z;
        cloud_out->points.push_back(p);
    }

    return cloud_out;

}

OBB findOBBPoint(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in)
{
    OBB obbPoint;//创建一个返回对象
    pcl::MomentOfInertiaEstimation <pcl::PointXYZ> feature_extractor;//实例化一个对象
    feature_extractor.setInputCloud (cloud_in);//设置输入点云
    feature_extractor.compute ();//开始特征计算
    std::vector <float> moment_of_inertia;//存放惯性距的特征向量
    std::vector <float> eccentricity;//存放偏心率的特征向量

    pcl::PointXYZ min_point_OBB;
    pcl::PointXYZ max_point_OBB;
    pcl::PointXYZ position_OBB;
    Eigen::Matrix3f rotational_matrix_OBB;
    float major_value, middle_value, minor_value;
    Eigen::Vector3f major_vector, middle_vector, minor_vector;
    Eigen::Vector3f mass_center;
    feature_extractor.getMomentOfInertia (moment_of_inertia);//计算出的惯性矩
    feature_extractor.getEccentricity (eccentricity);//计算出的偏心率
    feature_extractor.getOBB (min_point_OBB, max_point_OBB, position_OBB, rotational_matrix_OBB);//OBB对应的相关参数

    feature_extractor.getEigenValues (major_value, middle_value, minor_value);//三个特征值
    feature_extractor.getEigenVectors (major_vector, middle_vector, minor_vector);//三个特征向量
    feature_extractor.getMassCenter (mass_center);//计算质心

    Eigen::Vector3f position (position_OBB.x, position_OBB.y, position_OBB.z);
    std::cout<<"position_OBB: "<<position_OBB<<endl;
    std::cout<<"mass_center: "<<mass_center<<endl;//中心坐标
    Eigen::Quaternionf quat (rotational_matrix_OBB);

    std::cout<<"size of cloud :"<<cloud_in->points.size()<<endl;
    std::cout<<"moment_of_inertia :"<<moment_of_inertia.size()<<endl;
    std::cout<<"eccentricity :"<<eccentricity.size()<<endl;

    obbPoint.position = position;
    obbPoint.max_point_OBB = max_point_OBB;
    obbPoint.min_point_OBB = min_point_OBB;
    obbPoint.quat = quat;



    return obbPoint;
}

效果如图

 

-------------------------------------------------------------------------分割线----------------------------------------------------------------

1、简介

使用对应点的聚类算法(Correspondence Grouping Algorithms)来对利用特征点匹配到的场景中的对应点聚类为待识别模型。该算法输出的每一个聚类,代表场景中的一个模型实例假设,同时,对应一个变换矩阵,是该模型实例假设对应的位姿估计。

2、代码(代码不太懂,先继续学习后面的,回头再看)->(前面的大致搞懂<已经根据自己的理解添加部分注释>,后面的输出模块以及一些矩阵变换什么的不懂)

//不太懂,学完再重读
#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <pcl/correspondence.h>
#include <pcl/features/normal_3d_omp.h>
#include <pcl/features/shot_omp.h>
#include <pcl/features/board.h>
#include <pcl/keypoints/uniform_sampling.h>
#include <pcl/recognition/cg/hough_3d.h>
#include <pcl/recognition/cg/geometric_consistency.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/kdtree/kdtree_flann.h>
#include <pcl/kdtree/impl/kdtree_flann.hpp>
#include <pcl/common/transforms.h>
#include <pcl/console/parse.h>

typedef pcl::PointXYZRGBA PointType;
typedef pcl::Normal NormalType;
typedef pcl::ReferenceFrame RFType;
typedef pcl::SHOT352 DescriptorType;

std::string model_filename_;//模型文件
std::string scene_filename_;//场景文件

//Algorithm params
bool show_keypoints_ (false);//显示关键点
bool show_correspondences_ (false);//显示对应点
bool use_cloud_resolution_ (false);//利用-r选项控制,如果设置为真,所有参数将与点云分辨率相乘得到最终使用的参数
bool use_hough_ (true);//默认算法为Hough 3D Grouping
float model_ss_ (0.01f);//模型均匀采样的搜索半径
float scene_ss_ (0.03f);//场景均匀采样的搜索半径
float rf_rad_ (0.015f);//参考坐标系的半径
float descr_rad_ (0.02f);
float cg_size_ (0.01f);//聚类大小
float cg_thresh_ (5.0f);//聚类阈值

void
showHelp (char *filename)
{
  std::cout << std::endl;
  std::cout << "***************************************************************************" << std::endl;
  std::cout << "*                                                                         *" << std::endl;
  std::cout << "*             Correspondence Grouping Tutorial - Usage Guide              *" << std::endl;
  std::cout << "*                                                                         *" << std::endl;
  std::cout << "***************************************************************************" << std::endl << std::endl;
  std::cout << "Usage: " << filename << " model_filename.pcd scene_filename.pcd [Options]" << std::endl << std::endl;
  std::cout << "Options:" << std::endl;
  std::cout << "     -h:                     Show this help." << std::endl;
  std::cout << "     -k:                     Show used keypoints." << std::endl;
  std::cout << "     -c:                     Show used correspondences." << std::endl;
  std::cout << "     -r:                     Compute the model cloud resolution and multiply" << std::endl;
  std::cout << "                             each radius given by that value." << std::endl;
  std::cout << "     --algorithm (Hough|GC): Clustering algorithm used (default Hough)." << std::endl;
  std::cout << "     --model_ss val:         Model uniform sampling radius (default 0.01)" << std::endl;
  std::cout << "     --scene_ss val:         Scene uniform sampling radius (default 0.03)" << std::endl;
  std::cout << "     --rf_rad val:           Reference frame radius (default 0.015)" << std::endl;
  std::cout << "     --descr_rad val:        Descriptor radius (default 0.02)" << std::endl;
  std::cout << "     --cg_size val:          Cluster size (default 0.01)" << std::endl;
  std::cout << "     --cg_thresh val:        Clustering threshold (default 5)" << std::endl << std::endl;
}

void
parseCommandLine (int argc, char *argv[])
{
  //Show help
  if (pcl::console::find_switch (argc, argv, "-h"))
  {
    showHelp (argv[0]);
    exit (0);
  }

  //Model & scene filenames
  std::vector<int> filenames;
  filenames = pcl::console::parse_file_extension_argument (argc, argv, ".pcd");//在命令行的输入中寻找.pcd为扩展名的文件,返回文件名索引的vector
  if (filenames.size () != 2)
  {
    std::cout << "Filenames missing.\n";
    showHelp (argv[0]);
    exit (-1);
  }

  model_filename_ = argv[filenames[0]];//命令行的第二个参数赋值给model_filename
  scene_filename_ = argv[filenames[1]];//命令行的第三个参数解析为scene_filename

  //Program behavior
  if (pcl::console::find_switch (argc, argv, "-k"))
  {
    show_keypoints_ = true;
  }
  if (pcl::console::find_switch (argc, argv, "-c"))
  {
    show_correspondences_ = true;
  }
  if (pcl::console::find_switch (argc, argv, "-r"))
  {
    use_cloud_resolution_ = true;
  }

  std::string used_algorithm;
  if (pcl::console::parse_argument (argc, argv, "--algorithm", used_algorithm) != -1)//搜索 --algorithm将后面的字符给used_algorithm
  {
    if (used_algorithm.compare ("Hough") == 0)//两种算法,默认为hough
    {
      use_hough_ = true;
    }else if (used_algorithm.compare ("GC") == 0)
    {
      use_hough_ = false;
    }
    else//两种算法都不是,则输入的算法错误
    {
      std::cout << "Wrong algorithm name.\n";
      showHelp (argv[0]);
      exit (-1);
    }
  }

  //General parameters
  //将“”中的内容后面的赋值给函数的最后一个参数
  pcl::console::parse_argument (argc, argv, "--model_ss", model_ss_);
  pcl::console::parse_argument (argc, argv, "--scene_ss", scene_ss_);
  pcl::console::parse_argument (argc, argv, "--rf_rad", rf_rad_);
  pcl::console::parse_argument (argc, argv, "--descr_rad", descr_rad_);
  pcl::console::parse_argument (argc, argv, "--cg_size", cg_size_);
  pcl::console::parse_argument (argc, argv, "--cg_thresh", cg_thresh_);
}

//计算点云分辨率,返回结果是点云的分辨率
double
computeCloudResolution (const pcl::PointCloud<PointType>::ConstPtr &cloud)
{
  double res = 0.0;
  int n_points = 0;
  int nres;
  std::vector<int> indices (2);//输入点云的每个点(该vector中元素为两个0)
  std::vector<float> sqr_distances (2);//与每一个点邻近的距离的平均值(两个0)
  pcl::search::KdTree<PointType> tree;//设置搜索方式
  tree.setInputCloud (cloud);//设置输入点云

  for (size_t i = 0; i < cloud->size (); ++i)
  {
    if (! pcl_isfinite ((*cloud)[i].x))
    {
      continue;
    }
    //Considering the second neighbor since the first is the point itself.
    nres = tree.nearestKSearch (i, 2, indices, sqr_distances);//对cloud中的每一个元素,寻找它的K(2)近邻元素,返回找到的K近邻数目
    if (nres == 2)
    {
      res += sqrt (sqr_distances[1]);
      ++n_points;
    }
  }
  if (n_points != 0)
  {
    res /= n_points;
  }
  return res;
}

int
main (int argc, char *argv[])
{
  parseCommandLine (argc, argv);

  pcl::PointCloud<PointType>::Ptr model (new pcl::PointCloud<PointType> ());//模型及其关键点
  pcl::PointCloud<PointType>::Ptr model_keypoints (new pcl::PointCloud<PointType> ());
  pcl::PointCloud<PointType>::Ptr scene (new pcl::PointCloud<PointType> ());//场景及其关键点
  pcl::PointCloud<PointType>::Ptr scene_keypoints (new pcl::PointCloud<PointType> ());
  pcl::PointCloud<NormalType>::Ptr model_normals (new pcl::PointCloud<NormalType> ());//模型的法线
  pcl::PointCloud<NormalType>::Ptr scene_normals (new pcl::PointCloud<NormalType> ());//场景的法线
  pcl::PointCloud<DescriptorType>::Ptr model_descriptors (new pcl::PointCloud<DescriptorType> ());
  pcl::PointCloud<DescriptorType>::Ptr scene_descriptors (new pcl::PointCloud<DescriptorType> ());

  //
  //  Load clouds
  //
  if (pcl::io::loadPCDFile (model_filename_, *model) < 0)
  {
    std::cout << "Error loading model cloud." << std::endl;
    showHelp (argv[0]);
    return (-1);
  }
  if (pcl::io::loadPCDFile (scene_filename_, *scene) < 0)
  {
    std::cout << "Error loading scene cloud." << std::endl;
    showHelp (argv[0]);
    return (-1);
  }

  //
  //  Set up resolution invariance
  //
  if (use_cloud_resolution_)//
  {
    float resolution = static_cast<float> (computeCloudResolution (model));
    if (resolution != 0.0f)
    {
      model_ss_   *= resolution;
      scene_ss_   *= resolution;
      rf_rad_     *= resolution;
      descr_rad_  *= resolution;
      cg_size_    *= resolution;
    }

    std::cout << "Model resolution:       " << resolution << std::endl;
    std::cout << "Model sampling size:    " << model_ss_ << std::endl;
    std::cout << "Scene sampling size:    " << scene_ss_ << std::endl;
    std::cout << "LRF support radius:     " << rf_rad_ << std::endl;
    std::cout << "SHOT descriptor radius: " << descr_rad_ << std::endl;
    std::cout << "Clustering bin size:    " << cg_size_ << std::endl << std::endl;
  }

  //
  //  Compute Normals
  //
  pcl::NormalEstimationOMP<PointType, NormalType> norm_est;
  norm_est.setKSearch (10);//设置10近邻的点使用特征估计
  norm_est.setInputCloud (model);//输入点云为model
  norm_est.compute (*model_normals);//计算法线到model_normals

  norm_est.setInputCloud (scene);//估计场景的点云
  norm_est.compute (*scene_normals);//场景点云法线估计结果

  std::cerr<<"   scene before downsample: "<<scene->size()<<" \n"<<"   model before downsample: "<<model->points.size()<<std::endl;
  //
  //  Downsample Clouds to Extract keypoints
  //
  pcl::PointCloud<int> sampled_indices;//定义一个采样之后的索引

  pcl::UniformSampling<PointType> uniform_sampling; //对给定的点云数据进行组装局部3D网格,并且下采样(形心代替体素中心)结果作为关键点
  uniform_sampling.setInputCloud (model);
  uniform_sampling.setRadiusSearch (model_ss_);
  uniform_sampling.compute (sampled_indices);//降采样结果输出至索引
  pcl::copyPointCloud (*model, sampled_indices.points, *model_keypoints);//将model中的数据按照索引保存在model_keypoints里面
  std::cout << "Model total points: " << model->size () << "; Selected Keypoints: " << model_keypoints->size () << std::endl;
  //将model关键点保存
  pcl::io::savePCDFileASCII("model_keypoints.pcd",*model_keypoints);
  std::cout<<"model_keypoints.pcd saved!"<<std::endl;

  uniform_sampling.setInputCloud (scene);
  uniform_sampling.setRadiusSearch (scene_ss_);
  uniform_sampling.compute (sampled_indices);
  pcl::copyPointCloud (*scene, sampled_indices.points, *scene_keypoints);//将scene中的数据按照索引保存在scene_keypoints中
  std::cout << "Scene total points: " << scene->size () << "; Selected Keypoints: " << scene_keypoints->size () << std::endl;
  //将scene关键点保存
  pcl::io::savePCDFileASCII("scene_keypoints.pcd",*scene_keypoints);
  std::cout<<"scene_keypoints.pcd saved!"<<std::endl;


  //
  //  Compute Descriptor for keypoints,计算SHOT特征描述子
  //
  pcl::SHOTEstimationOMP<PointType, NormalType, DescriptorType> descr_est;//估计给定的点云数据的SHOT描述符的直方图签名,包括点和法线
  descr_est.setRadiusSearch (descr_rad_);//设置计算特征描述子的半径

  descr_est.setInputCloud (model_keypoints);//设置输入的关键点
  descr_est.setInputNormals (model_normals);//model的法线
  descr_est.setSearchSurface (model);//设置输入的点云
  descr_est.compute (*model_descriptors);//得到model的特征描述子

  descr_est.setInputCloud (scene_keypoints);//下采样得到的关键点
  descr_est.setInputNormals (scene_normals);//法线
  descr_est.setSearchSurface (scene);//输入点云
  descr_est.compute (*scene_descriptors);//得到scene的特征描述子

  //利用KD树找到模型和场景的对应点
  //  Find Model-Scene Correspondences with KdTree
  //
  pcl::CorrespondencesPtr model_scene_corrs (new pcl::Correspondences ());//定义一个向量model_scene_corrs存放对应点

  pcl::KdTreeFLANN<DescriptorType> match_search;
  match_search.setInputCloud (model_descriptors);//输入模型特征描述子

  //对于每个场景的特征描述子,寻找模型特征点描述子的最近邻点,并将其加入特征点向量
  //  For each scene keypoint descriptor, find nearest neighbor into the model keypoints descriptor cloud and add it to the correspondences vector.
  for (size_t i = 0; i < scene_descriptors->size (); ++i)
  {
    std::vector<int> neigh_indices (1);
    std::vector<float> neigh_sqr_dists (1);
    if (!pcl_isfinite (scene_descriptors->at (i).descriptor[0])) //skipping NaNs
    {
      continue;
    }
    int found_neighs = match_search.nearestKSearch (scene_descriptors->at (i), 1, neigh_indices, neigh_sqr_dists);//返回发现的近邻点的数量
    //如果发现的近邻数为1,并且描述子的平方距离小于0.25f.
    if(found_neighs == 1 && neigh_sqr_dists[0] < 0.25f) //  add match only if the squared descriptor distance is less than 0.25 (SHOT descriptor distances are between 0 and 1 by design)
    {
      pcl::Correspondence corr (neigh_indices[0], static_cast<int> (i), neigh_sqr_dists[0]);
      model_scene_corrs->push_back (corr);
    }
  }
  std::cout << "Correspondences found: " << model_scene_corrs->size () << std::endl;

  //对之前的 对应点对集合 进行聚类,聚类算法默认为Hough
  //  Actual Clustering
  //                                                两个输出参数
  std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;//包含一个变换矩阵的向量,用于识别到场景中的模型的每个实例。
  std::vector<pcl::Correspondences> clustered_corrs;

  //  Using Hough3D
  if (use_hough_)
  {
    //
    //  Compute (Keypoints) Reference Frames only for Hough
    //
    pcl::PointCloud<RFType>::Ptr model_rf (new pcl::PointCloud<RFType> ());
    pcl::PointCloud<RFType>::Ptr scene_rf (new pcl::PointCloud<RFType> ());

    pcl::BOARDLocalReferenceFrameEstimation<PointType, NormalType, RFType> rf_est;
    rf_est.setFindHoles (true);
    rf_est.setRadiusSearch (rf_rad_);

    rf_est.setInputCloud (model_keypoints);//模型关键点
    rf_est.setInputNormals (model_normals);//模型法线
    rf_est.setSearchSurface (model);
    rf_est.compute (*model_rf);

    rf_est.setInputCloud (scene_keypoints);
    rf_est.setInputNormals (scene_normals);
    rf_est.setSearchSurface (scene);
    rf_est.compute (*scene_rf);

    //  Clustering聚类方式一
    pcl::Hough3DGrouping<PointType, PointType, RFType, RFType> clusterer;
    clusterer.setHoughBinSize (cg_size_);//Hough空间采样间隔
    clusterer.setHoughThreshold (cg_thresh_);//聚类阈值
    clusterer.setUseInterpolation (true);//设置是否对投票在Hough空间进行插值计算
    //------------------------------------------------------------------------------8.15中午
    clusterer.setUseDistanceWeight (false);//设置投票时是否将对应点之间的距离作为权重参与计算

    clusterer.setInputCloud (model_keypoints);//设置输入点云(模型降采样之后的模型关键点)
    clusterer.setInputRf (model_rf);//设置输入数据集的参考坐标系
    clusterer.setSceneCloud (scene_keypoints);//设置输入场景数据
    clusterer.setSceneRf (scene_rf);//设置输入场景的参考坐标系
    clusterer.setModelSceneCorrespondences (model_scene_corrs);//提供一个指针指向之前计算的在输入数据和场景数据之间的对应点

    //clusterer.cluster (clustered_corrs);
    clusterer.recognize (rototranslations, clustered_corrs);//在扫描场景中识别模型实例,若成功则返回true
  }
  else // Using GeometricConsistency 聚类方式二
  {
    pcl::GeometricConsistencyGrouping<PointType, PointType> gc_clusterer;
    gc_clusterer.setGCSize (cg_size_);//聚类分辨率
    gc_clusterer.setGCThreshold (cg_thresh_);//设置聚类的最小体积

    gc_clusterer.setInputCloud (model_keypoints);//设置模型关键点
    gc_clusterer.setSceneCloud (scene_keypoints);//设置场景关键点
    gc_clusterer.setModelSceneCorrespondences (model_scene_corrs);//提供一个指针指向之前计算的model和scene之间的对应点

    //gc_clusterer.cluster (clustered_corrs);
    gc_clusterer.recognize (rototranslations, clustered_corrs);//在扫描场景中识别模型实例,若识别成功则返回true
  }

  //
  //  Output results
  //
  std::cout << "Model instances found: " << rototranslations.size () << std::endl;
  for (size_t i = 0; i < rototranslations.size (); ++i)
  {
    std::cout << "\n    Instance " << i + 1 << ":" << std::endl;
    std::cout << "        Correspondences belonging to this instance: " << clustered_corrs[i].size () << std::endl;

    // Print the rotation matrix and translation vector
    Eigen::Matrix3f rotation = rototranslations[i].block<3,3>(0, 0);
    Eigen::Vector3f translation = rototranslations[i].block<3,1>(0, 3);

    printf ("\n");
    printf ("            | %6.3f %6.3f %6.3f | \n", rotation (0,0), rotation (0,1), rotation (0,2));
    printf ("        R = | %6.3f %6.3f %6.3f | \n", rotation (1,0), rotation (1,1), rotation (1,2));
    printf ("            | %6.3f %6.3f %6.3f | \n", rotation (2,0), rotation (2,1), rotation (2,2));
    printf ("\n");
    printf ("        t = < %0.3f, %0.3f, %0.3f >\n", translation (0), translation (1), translation (2));
  }

  //
  //  Visualization
  //
  int v1(0);
  int v2(0);
  pcl::visualization::PCLVisualizer viewer ("点云库PCL学习教程第二版-基于对应点聚类的3D模型识别");
  //第一个窗口
  viewer.createViewPort(0,0,0.5,1,v1);
  viewer.addPointCloud (scene, "scene_cloud",v1);
  viewer.setBackgroundColor(255,255,255,v1);
  pcl::PointCloud<PointType>::Ptr off_scene_model (new pcl::PointCloud<PointType> ());
  pcl::PointCloud<PointType>::Ptr off_scene_model_keypoints (new pcl::PointCloud<PointType> ());

  //如果在命令行不附加参数则这个if直接跳过
  if (show_correspondences_ || show_keypoints_)
  {
    //  We are translating the model so that it doesn't end in the middle of the scene representation
    //这两个是要用来输出的
    pcl::transformPointCloud (*model, *off_scene_model, Eigen::Vector3f (0,0,0), Eigen::Quaternionf (1, 0, 0, 0));
    pcl::transformPointCloud (*model_keypoints, *off_scene_model_keypoints, Eigen::Vector3f (0,0,0), Eigen::Quaternionf (1, 0, 0, 0));

    pcl::visualization::PointCloudColorHandlerCustom<PointType> off_scene_model_color_handler (off_scene_model, 0, 255, 0);
    viewer.addPointCloud (off_scene_model, off_scene_model_color_handler, "off_scene_model");
  }

  //是否显示关键点
  if (show_keypoints_)
  {
    pcl::visualization::PointCloudColorHandlerCustom<PointType> scene_keypoints_color_handler (scene_keypoints, 0, 0, 255);
    viewer.addPointCloud (scene_keypoints, scene_keypoints_color_handler, "scene_keypoints");
    viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 5, "scene_keypoints");

    pcl::visualization::PointCloudColorHandlerCustom<PointType> off_scene_model_keypoints_color_handler (off_scene_model_keypoints, 0, 0, 255);
    viewer.addPointCloud (off_scene_model_keypoints, off_scene_model_keypoints_color_handler, "off_scene_model_keypoints");
    viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 5, "off_scene_model_keypoints");
  }

  for (size_t i = 0; i < rototranslations.size (); ++i)
  {
    pcl::PointCloud<PointType>::Ptr rotated_model (new pcl::PointCloud<PointType> ());
    pcl::transformPointCloud (*model, *rotated_model, rototranslations[i]);//生成变换后的模型

    std::stringstream ss_cloud;
    ss_cloud << "instance" << i;

    pcl::visualization::PointCloudColorHandlerCustom<PointType> rotated_model_color_handler (rotated_model, 255, 0, 0);
    viewer.addPointCloud (rotated_model, rotated_model_color_handler, ss_cloud.str (),v1);

    //是否显示对应
    if (show_correspondences_)
    {
      for (size_t j = 0; j < clustered_corrs[i].size (); ++j)
      {
        std::stringstream ss_line;
        ss_line << "correspondence_line" << i << "_" << j;
        //在两个模型中分别找出对应点
        PointType& model_point = off_scene_model_keypoints->at (clustered_corrs[i][j].index_query);
        PointType& scene_point = scene_keypoints->at (clustered_corrs[i][j].index_match);

        //  We are drawing a line for each pair of clustered correspondences found between the model and the scene
        viewer.addLine<PointType, PointType> (model_point, scene_point, 0, 255, 0, ss_line.str ());
      }
    }
  }
  //第二个窗口,只显示milk
  viewer.createViewPort(0.5,0,1,1,v2);
  viewer.setBackgroundColor(255,255,255,v2);
  viewer.addPointCloud(model,"model_cloud",v2);


  while (!viewer.wasStopped ())
  {
    viewer.spinOnce ();
  }

  return (0);
}

3、可视化(右边点云作为输入的模型,左边红色的是在一个场景中检测到的该物体)

 2019.11.17日更新

带包围盒的识别

#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>     //点云类型头文件
#include <pcl/correspondence.h>   //对应表示两个实体之间的匹配(例如,点,描述符等)。
#include <pcl/features/normal_3d_omp.h>   //法线
#include <pcl/features/shot_omp.h>    //描述子
#include <pcl/features/board.h>       
#include <pcl/keypoints/uniform_sampling.h>   //均匀采样
#include <pcl/recognition/cg/hough_3d.h>    //hough算子
#include <pcl/recognition/cg/geometric_consistency.h>  //几何一致性
#include <pcl/visualization/pcl_visualizer.h>   //可视化
#include <pcl/kdtree/kdtree_flann.h>             //配准方法
#include <pcl/kdtree/impl/kdtree_flann.hpp>      //
#include <pcl/common/transforms.h>             //转换矩阵
#include <pcl/console/parse.h>


#include <pcl/features/moment_of_inertia_estimation.h>//惯性矩估计的头文件

typedef pcl::PointXYZRGBA PointType;  //PointXYZRGBA数据结构
typedef pcl::Normal NormalType;       //法线结构
typedef pcl::ReferenceFrame RFType;    //参考帧
typedef pcl::SHOT352 DescriptorType;   //SHOT特征的数据结构http://www.cnblogs.com/li-yao7758258/p/6612856.html

std::string model_filename_;   //模型的文件名
std::string scene_filename_;

//Algorithm params
bool show_keypoints_(true);
bool show_correspondences_(true);
bool use_cloud_resolution_(false);
bool use_hough_(true);
float model_ss_(0.01f);
float scene_ss_(0.03f);
float rf_rad_(0.015f);
float descr_rad_(0.02f);
float cg_size_(0.01f);
float cg_thresh_(5.0f);

//为了提取OBB创建一个结构体 viewer->addCube (position, quat, max_point_OBB.x - min_point_OBB.x,
// max_point_OBB.y - min_point_OBB.y, max_point_OBB.z - min_point_OBB.z, "OBB");
struct OBB
{
    Eigen::Vector3f position;
    Eigen::Quaternionf quat;
    pcl::PointXYZ min_point_OBB;
    pcl::PointXYZ max_point_OBB;

};
OBB findOBBPoint(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in);
std::vector<pcl::PointXYZ> findCubePoint(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in);//找到点云的两个对角点并返回
pcl::PointCloud<pcl::PointXYZ>::Ptr xyzRGBA2xyz(pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_in);
void
showHelp(char *filename)
{
    std::cout << std::endl;
    std::cout << "***************************************************************************" << std::endl;
    std::cout << "*                                                                         *" << std::endl;
    std::cout << "*             Correspondence Grouping Tutorial - Usage Guide              *" << std::endl;
    std::cout << "*                                                                         *" << std::endl;
    std::cout << "***************************************************************************" << std::endl << std::endl;
    std::cout << "Usage: " << filename << " model_filename.pcd scene_filename.pcd [Options]" << std::endl << std::endl;
    std::cout << "Options:" << std::endl;
    std::cout << "     -h:                     Show this help." << std::endl;
    std::cout << "     -k:                     Show used keypoints." << std::endl;
    std::cout << "     -c:                     Show used correspondences." << std::endl;
    std::cout << "     -r:                     Compute the model cloud resolution and multiply" << std::endl;
    std::cout << "                             each radius given by that value." << std::endl;
    std::cout << "     --algorithm (Hough|GC): Clustering algorithm used (default Hough)." << std::endl;
    std::cout << "     --model_ss val:         Model uniform sampling radius (default 0.01)" << std::endl;
    std::cout << "     --scene_ss val:         Scene uniform sampling radius (default 0.03)" << std::endl;
    std::cout << "     --rf_rad val:           Reference frame radius (default 0.015)" << std::endl;
    std::cout << "     --descr_rad val:        Descriptor radius (default 0.02)" << std::endl;
    std::cout << "     --cg_size val:          Cluster size (default 0.01)" << std::endl;
    std::cout << "     --cg_thresh val:        Clustering threshold (default 5)" << std::endl << std::endl;
}

void
parseCommandLine(int argc, char *argv[])
{
    //Show help
    if (pcl::console::find_switch(argc, argv, "-h"))
    {
        showHelp(argv[0]);
        exit(0);
    }

    //Model & scene filenames
    std::vector<int> filenames;
    filenames = pcl::console::parse_file_extension_argument(argc, argv, ".pcd");
    if (filenames.size() != 2)
    {
        for (int i = 0; i < filenames.size(); i++)
            std::cout << filenames.at(i) << endl;
        std::cout << "Filenames missing.\n";
        showHelp(argv[0]);
        exit(-1);
    }

    model_filename_ = argv[filenames[0]];
    scene_filename_ = argv[filenames[1]];

    //Program behavior
    if (pcl::console::find_switch(argc, argv, "-k"))
    {
        show_keypoints_ = true;
    }
    if (pcl::console::find_switch(argc, argv, "-c"))
    {
        show_correspondences_ = true;
    }
    if (pcl::console::find_switch(argc, argv, "-r")) //计算点云的分辨率和多样性
    {
        use_cloud_resolution_ = true;
    }

    std::string used_algorithm;
    if (pcl::console::parse_argument(argc, argv, "--algorithm", used_algorithm) != -1)
    {
        if (used_algorithm.compare("Hough") == 0)
        {
            use_hough_ = true;
        }
        else if (used_algorithm.compare("GC") == 0)
        {
            use_hough_ = false;
        }
        else
        {
            std::cout << "Wrong algorithm name.\n";
            showHelp(argv[0]);
            exit(-1);
        }
    }

    //General parameters
    pcl::console::parse_argument(argc, argv, "--model_ss", model_ss_);
    pcl::console::parse_argument(argc, argv, "--scene_ss", scene_ss_);
    pcl::console::parse_argument(argc, argv, "--rf_rad", rf_rad_);
    pcl::console::parse_argument(argc, argv, "--descr_rad", descr_rad_);
    pcl::console::parse_argument(argc, argv, "--cg_size", cg_size_);
    pcl::console::parse_argument(argc, argv, "--cg_thresh", cg_thresh_);
}

double
computeCloudResolution(const pcl::PointCloud<PointType>::ConstPtr &cloud)//计算点云分辨率
{
    double res = 0.0;
    int n_points = 0;
    int nres;
    std::vector<int> indices(2);
    std::vector<float> sqr_distances(2);
    pcl::search::KdTree<PointType> tree;
    tree.setInputCloud(cloud);   //设置输入点云

    for (size_t i = 0; i < cloud->size(); ++i)
    {
        if (!pcl_isfinite((*cloud)[i].x))
        {
            continue;
        }
        //Considering the second neighbor since the first is the point itself.
        //运算第二个临近点,因为第一个点是它本身
        nres = tree.nearestKSearch(i, 2, indices, sqr_distances);//return :number of neighbors found 
        if (nres == 2)
        {
            res += sqrt(sqr_distances[1]);
            ++n_points;
        }
    }
    if (n_points != 0)
    {
        res /= n_points;
    }
    return res;
}

int
main(int argc, char *argv[])
{
    parseCommandLine(argc, argv);

    pcl::PointCloud<PointType>::Ptr model(new pcl::PointCloud<PointType>()); //模型点云
    pcl::PointCloud<PointType>::Ptr model_keypoints(new pcl::PointCloud<PointType>());//模型角点
    pcl::PointCloud<PointType>::Ptr scene(new pcl::PointCloud<PointType>());  //目标点云
    pcl::PointCloud<PointType>::Ptr scene_keypoints(new pcl::PointCloud<PointType>());//目标角点
    pcl::PointCloud<NormalType>::Ptr model_normals(new pcl::PointCloud<NormalType>()); //法线
    pcl::PointCloud<NormalType>::Ptr scene_normals(new pcl::PointCloud<NormalType>());
    pcl::PointCloud<DescriptorType>::Ptr model_descriptors(new pcl::PointCloud<DescriptorType>()); //描述子
    pcl::PointCloud<DescriptorType>::Ptr scene_descriptors(new pcl::PointCloud<DescriptorType>());

    //载入文件  载入模型(模板)
    if (pcl::io::loadPCDFile(model_filename_, *model) < 0)
    {
        std::cout << "Error loading model cloud." << std::endl;
        showHelp(argv[0]);
        return (-1);
    }
    if (pcl::io::loadPCDFile(scene_filename_, *scene) < 0)  //载入场景
    {
        std::cout << "Error loading scene cloud." << std::endl;
        showHelp(argv[0]);
        return (-1);
    }


    // 设置分辨率
    if (use_cloud_resolution_)
    {
        float resolution = static_cast<float> (computeCloudResolution(model));
        if (resolution != 0.0f)
        {
            model_ss_ *= resolution;
            scene_ss_ *= resolution;
            rf_rad_ *= resolution;
            descr_rad_ *= resolution;
            cg_size_ *= resolution;
        }

        std::cout << "Model resolution:       " << resolution << std::endl;
        std::cout << "Model sampling size:    " << model_ss_ << std::endl;
        std::cout << "Scene sampling size:    " << scene_ss_ << std::endl;
        std::cout << "LRF support radius:     " << rf_rad_ << std::endl;
        std::cout << "SHOT descriptor radius: " << descr_rad_ << std::endl;
        std::cout << "Clustering bin size:    " << cg_size_ << std::endl << std::endl;
    }

    //计算法线 normalestimationomp估计局部表面特性在每个三个特点,分别表面的法向量和曲率,平行,使用OpenMP标准。//初始化调度程序并设置要使用的线程数(默认为0)。
    pcl::NormalEstimationOMP<PointType, NormalType> norm_est;
    norm_est.setKSearch(10);       //设置K邻域搜索阀值为10个点
    norm_est.setInputCloud(model);  //设置输入点云
    norm_est.compute(*model_normals);   //计算点云法线 

    norm_est.setInputCloud(scene);
    norm_est.compute(*scene_normals);
    //计算法线


    //均匀采样点云并提取关键点      体素下采样,重心代替
    pcl::PointCloud<int> sampled_indices;
    pcl::UniformSampling<PointType> uniform_sampling;
    uniform_sampling.setInputCloud(model);  //输入点云
    uniform_sampling.setRadiusSearch(model_ss_);   //设置半径 model_ss_初值是0.01可以通过agv修改
	uniform_sampling.compute (sampled_indices);
  	pcl::copyPointCloud (*model, sampled_indices.points, *model_keypoints);
  	std::cout << "Model total points: " << model->size () << "; Selected Keypoints: " << 			model_keypoints->size () << std::endl;

   uniform_sampling.setInputCloud (scene);
   uniform_sampling.setRadiusSearch (scene_ss_);
   uniform_sampling.compute (sampled_indices);
   pcl::copyPointCloud (*scene, sampled_indices.points, *scene_keypoints);
   std::cout << "Scene total points: " << scene->size() << "; Selected Keypoints: " << scene_keypoints->size() << std::endl;
    //均匀采样点云并提取关键点      体素下采样,重心代替



    //为关键点计算描述子
    pcl::SHOTEstimationOMP<PointType, NormalType, DescriptorType> descr_est;
    descr_est.setRadiusSearch(descr_rad_);  //设置搜索半径

    descr_est.setInputCloud(model_keypoints);  //输入模型的关键点
    descr_est.setInputNormals(model_normals);  //输入模型的法线
    descr_est.setSearchSurface(model);         //输入的点云
    descr_est.compute(*model_descriptors);     //计算描述子

    descr_est.setInputCloud(scene_keypoints);  //同理
    descr_est.setInputNormals(scene_normals);
    descr_est.setSearchSurface(scene);
    descr_est.compute(*scene_descriptors);

    //  使用Kdtree找出 Model-Scene 匹配点
    pcl::CorrespondencesPtr model_scene_corrs(new pcl::Correspondences());

    pcl::KdTreeFLANN<DescriptorType> match_search;   //设置配准的方法
    match_search.setInputCloud(model_descriptors);  //输入模板点云的描述子

    //每一个场景的关键点描述子都要找到模板中匹配的关键点描述子并将其添加到对应的匹配向量中。
    for (size_t i = 0; i < scene_descriptors->size(); ++i)
    {
        std::vector<int> neigh_indices(1);   //设置最近邻点的索引
        std::vector<float> neigh_sqr_dists(1); //申明最近邻平方距离值
        if (!pcl_isfinite(scene_descriptors->at(i).descriptor[0])) //忽略 NaNs点
        {
            continue;
        }
        int found_neighs = match_search.nearestKSearch(scene_descriptors->at(i), 1, neigh_indices, neigh_sqr_dists);
        //scene_descriptors->at (i)是给定点云 1是临近点个数 ,neigh_indices临近点的索引  neigh_sqr_dists是与临近点的索引

        if (found_neighs == 1 && neigh_sqr_dists[0] < 0.25f) // 仅当描述子与临近点的平方距离小于0.25(描述子与临近的距离在一般在0到1之间)才添加匹配
        {
            //neigh_indices[0]给定点,  i  是配准数  neigh_sqr_dists[0]与临近点的平方距离
            pcl::Correspondence corr(neigh_indices[0], static_cast<int> (i), neigh_sqr_dists[0]);
            model_scene_corrs->push_back(corr);   //把配准的点存储在容器中
        }
    }
    std::cout << "Correspondences found: " << model_scene_corrs->size() << std::endl;


    //  实际的配准方法的实现
    std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;
    std::vector<pcl::Correspondences> clustered_corrs;

    //  使用 Hough3D算法寻找匹配点
    if (use_hough_)
    {
        //
        //  Compute (Keypoints) Reference Frames only for Hough
        //计算参考帧的Hough(也就是关键点)
        cout << "使用3d hough 匹配方法" << endl;
        pcl::PointCloud<RFType>::Ptr model_rf(new pcl::PointCloud<RFType>());
        pcl::PointCloud<RFType>::Ptr scene_rf(new pcl::PointCloud<RFType>());
        //特征估计的方法(点云,法线,参考帧)
        pcl::BOARDLocalReferenceFrameEstimation<PointType, NormalType, RFType> rf_est;
        rf_est.setFindHoles(true);
        rf_est.setRadiusSearch(rf_rad_);   //设置搜索半径

        rf_est.setInputCloud(model_keypoints);  //模型关键点
        rf_est.setInputNormals(model_normals); //模型法线
        rf_est.setSearchSurface(model);    //模型
        rf_est.compute(*model_rf);      //模型的参考帧   

        rf_est.setInputCloud(scene_keypoints);  //同理
        rf_est.setInputNormals(scene_normals);
        rf_est.setSearchSurface(scene);
        rf_est.compute(*scene_rf);

        //  Clustering聚类的方法

        //对输入点与的聚类,以区分不同的实例的场景中的模型 
        pcl::Hough3DGrouping<PointType, PointType, RFType, RFType> clusterer;
        clusterer.setHoughBinSize(cg_size_);//霍夫空间设置每个bin的大小
        clusterer.setHoughThreshold(cg_thresh_);//阀值
        clusterer.setUseInterpolation(true);
        clusterer.setUseDistanceWeight(false);

        clusterer.setInputCloud(model_keypoints);
        clusterer.setInputRf(model_rf);   //设置输入的参考帧
        clusterer.setSceneCloud(scene_keypoints);
        clusterer.setSceneRf(scene_rf);
        clusterer.setModelSceneCorrespondences(model_scene_corrs);//model_scene_corrs存储配准的点

        //clusterer.cluster (clustered_corrs);辨认出聚类的对象
        clusterer.recognize(rototranslations, clustered_corrs);
    }
    else // Using GeometricConsistency  或者使用几何一致性性质
    {
        cout << "使用几何一致性方法" << endl;
        pcl::GeometricConsistencyGrouping<PointType, PointType> gc_clusterer;
        gc_clusterer.setGCSize(cg_size_);   //设置几何一致性的大小
        gc_clusterer.setGCThreshold(cg_thresh_); //阀值

        gc_clusterer.setInputCloud(model_keypoints);
        gc_clusterer.setSceneCloud(scene_keypoints);
        gc_clusterer.setModelSceneCorrespondences(model_scene_corrs);

        //gc_clusterer.cluster (clustered_corrs);
        gc_clusterer.recognize(rototranslations, clustered_corrs);
    }

    //输出的结果  找出输入模型是否在场景中出现
    std::cout << "Model instances found: " << rototranslations.size() << std::endl;
    for (size_t i = 0; i < rototranslations.size(); ++i)
    {
        std::cout << "\n    Instance " << i + 1 << ":" << std::endl;
        std::cout << "        Correspondences belonging to this instance: " << clustered_corrs[i].size() << std::endl;

        // 打印处相对于输入模型的旋转矩阵与平移矩阵
        Eigen::Matrix3f rotation = rototranslations[i].block<3, 3>(0, 0);
        Eigen::Vector3f translation = rototranslations[i].block<3, 1>(0, 3);

        printf("\n");
        printf("            | %6.3f %6.3f %6.3f | \n", rotation(0, 0), rotation(0, 1), rotation(0, 2));
        printf("        R = | %6.3f %6.3f %6.3f | \n", rotation(1, 0), rotation(1, 1), rotation(1, 2));
        printf("            | %6.3f %6.3f %6.3f | \n", rotation(2, 0), rotation(2, 1), rotation(2, 2));
        printf("\n");
        printf("        t = < %0.3f, %0.3f, %0.3f >\n", translation(0), translation(1), translation(2));
    }

    //可视化
    pcl::visualization::PCLVisualizer viewer("Correspondence Grouping");
//    viewer.initCameraParameters ();
    viewer.addPointCloud(scene, "scene_cloud");//可视化场景点云

    pcl::PointCloud<PointType>::Ptr off_scene_model(new pcl::PointCloud<PointType>());
    pcl::PointCloud<PointType>::Ptr off_scene_model_keypoints(new pcl::PointCloud<PointType>());

    if (show_correspondences_ || show_keypoints_)  //可视化配准点
    {
        //  We are translating the model so that it doesn't end in the middle of the scene representation
        //就是要对输入的模型进行旋转与平移,使其在可视化界面的中间位置
        pcl::transformPointCloud(*model, *off_scene_model, Eigen::Vector3f(-1, 0, 0), Eigen::Quaternionf(1, 0, 0, 0));
        pcl::transformPointCloud(*model_keypoints, *off_scene_model_keypoints, Eigen::Vector3f(-1, 0, 0), Eigen::Quaternionf(1, 0, 0, 0));  //因为模型的位置变化了,所以要对特征点进行变化

        ///对模型off_scene_model点云上颜色
        pcl::visualization::PointCloudColorHandlerCustom<PointType> off_scene_model_color_handler(off_scene_model, 255, 255, 128);
        viewer.addPointCloud(off_scene_model, off_scene_model_color_handler, "off_scene_model");
//
//        //寻找AABB包围盒
//        pcl::PointCloud<pcl::PointXYZ>::Ptr model = xyzRGBA2xyz(off_scene_model);   //使用函数进行类型转换 将XYZRGBA转换成XYZ
//        std::vector<pcl::PointXYZ> Cube2Point = findCubePoint(model);  //找到点云的两个对角点 第一个元素是最小,第二个是最大
//        viewer.addCube(Cube2Point[0].x,Cube2Point[1].x,Cube2Point[0].y,Cube2Point[1].y,Cube2Point[0].z,Cube2Point[1].z,255,0.0,0.0,"AABB");
//        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_OPACITY,1,"AABB");//线的不透明度
//        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_LINE_WIDTH,20,"AABB");//边界线宽

        //寻找模型的包围盒OBB
        pcl::PointCloud<pcl::PointXYZ>::Ptr model = xyzRGBA2xyz(off_scene_model);   //使用函数进行类型转换 将XYZRGBA转换成XYZ
        OBB obbPoint = findOBBPoint(model);
        viewer.addCube(obbPoint.position,obbPoint.quat,
                obbPoint.max_point_OBB.x - obbPoint.min_point_OBB.x,
                obbPoint.max_point_OBB.y - obbPoint.min_point_OBB.y,
                obbPoint.max_point_OBB.z - obbPoint.min_point_OBB.z,"OBB");
        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_COLOR,255,0,0,"OBB");
        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_OPACITY,1,"OBB");
        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_LINE_WIDTH,20,"OBB");
    }

    if (show_keypoints_)   //可视化关键点
    {
        pcl::visualization::PointCloudColorHandlerCustom<PointType> scene_keypoints_color_handler(scene_keypoints, 0, 0, 255);  //对场景中的点云特征点上为绿色
        viewer.addPointCloud(scene_keypoints, scene_keypoints_color_handler, "scene_keypoints");
        viewer.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 5, "scene_keypoints");

        pcl::visualization::PointCloudColorHandlerCustom<PointType> off_scene_model_keypoints_color_handler(off_scene_model_keypoints, 0, 0, 255);
        viewer.addPointCloud(off_scene_model_keypoints, off_scene_model_keypoints_color_handler, "off_scene_model_keypoints");
        viewer.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 5, "off_scene_model_keypoints");
    }
    pcl::visualization::PointCloudColorHandlerRGBField<PointType> rgb(scene);
    viewer.addPointCloud(scene,rgb,"The Scene");

    ///识别场景中的模型rotated_model,显示为红色
    for (size_t i = 0; i < rototranslations.size(); ++i)
    {
        pcl::PointCloud<PointType>::Ptr rotated_model(new pcl::PointCloud<PointType>());
        pcl::transformPointCloud(*model, *rotated_model, rototranslations[i]);//把model转化为rotated_model
        //    <Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >   rototranslations是射影变换矩阵
        std::stringstream ss_cloud;
        ss_cloud << "instance" << i;

        pcl::visualization::PointCloudColorHandlerCustom<PointType> rotated_model_color_handler(rotated_model, 255, 0, 0);
        viewer.addPointCloud(rotated_model, rotated_model_color_handler, ss_cloud.str());

//        //寻找包围盒AABB
        pcl::PointCloud<pcl::PointXYZ>::Ptr model_scene = xyzRGBA2xyz(rotated_model);//使用函数进行类型转换 将XYZRGBA转换成XYZ
//        std::vector<pcl::PointXYZ> Cube2Point_scene = findCubePoint(model_scene);
//        std::cout<<"\n-----------------------------\n"
//                 <<Cube2Point_scene[0]<<"\n"
//                 <<Cube2Point_scene[1]<<"\n"
//                 <<Cube2Point_scene[2]
//                 <<"\n-------------------------------\n"<<std::endl;
//        viewer.addCube(Cube2Point_scene[0].x,Cube2Point_scene[1].x,Cube2Point_scene[0].y,Cube2Point_scene[1].y,Cube2Point_scene[0].z,Cube2Point_scene[1].z,0,255,0.0,"AABBscene");
//        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_OPACITY,1,"AABBscene");//线的不透明度
//        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_LINE_WIDTH,20,"AABBscene");//边界线宽

        //寻找包围盒OBB
        OBB obbPoint = findOBBPoint(model_scene);
        viewer.addCube(obbPoint.position,obbPoint.quat,
                       obbPoint.max_point_OBB.x - obbPoint.min_point_OBB.x,
                       obbPoint.max_point_OBB.y - obbPoint.min_point_OBB.y,
                       obbPoint.max_point_OBB.z - obbPoint.min_point_OBB.z,"OBBscene");
        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_COLOR,0,0,255,"OBBscene");
        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_OPACITY,1,"OBBscene");
        viewer.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_LINE_WIDTH,20,"OBBscene");

        if (show_correspondences_)   //显示配准连接
        {
            for (size_t j = 0; j < clustered_corrs[i].size(); ++j)
            {
                std::stringstream ss_line;
                ss_line << "correspondence_line" << i << "_" << j;
                PointType& model_point = off_scene_model_keypoints->at(clustered_corrs[i][j].index_query);
                PointType& scene_point = scene_keypoints->at(clustered_corrs[i][j].index_match);

                //  We are drawing a line for each pair of clustered correspondences found between the model and the scene
                viewer.addLine<PointType, PointType>(model_point, scene_point, 0, 255, 0, ss_line.str());
            }
        }
    }

//    viewer.addCoordinateSystem();
    while (!viewer.wasStopped())
    {
        viewer.spinOnce();
    }

    return (0);
}

//该函数找到AABB包围盒
std::vector<pcl::PointXYZ> findCubePoint(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in)
{

    std::vector<pcl::PointXYZ> AABB;//存放AABB的两个角点

    std::vector <float> moment_of_inertia;//存放惯性距的特征向量
    std::vector <float> eccentricity;//存放偏心率的特征向量
    pcl::PointXYZ min_point_AABB;
    pcl::PointXYZ max_point_AABB;

    pcl::PointXYZ min_point_OBB;
    pcl::PointXYZ max_point_OBB;
    pcl::PointXYZ position_OBB;
    Eigen::Matrix3f rotational_matrix_OBB;
    float major_value, middle_value, minor_value;
    Eigen::Vector3f major_vector, middle_vector, minor_vector;
    Eigen::Vector3f mass_center;


    pcl::MomentOfInertiaEstimation <pcl::PointXYZ> feature_extractor;//实例化一个对象
    feature_extractor.setInputCloud (cloud_in);//设置输入点云
    feature_extractor.compute ();//开始特征计算

    feature_extractor.getMomentOfInertia (moment_of_inertia);//计算出的惯性矩
    feature_extractor.getEccentricity (eccentricity);//计算出的偏心率
    feature_extractor.getAABB (min_point_AABB, max_point_AABB);//计算轴对称边界盒子
    AABB.push_back(min_point_AABB);
    AABB.push_back(max_point_AABB);//保存AABB的两个角点

    return AABB;
}

//点云类型转换
pcl::PointCloud<pcl::PointXYZ>::Ptr xyzRGBA2xyz(pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_in) {

    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZ>);
    cloud_out->resize(cloud_in->size());
    cloud_out->height = 1;
    cloud_out->width = cloud_out->size();
    cloud_out->is_dense = false;

    for (int i = 0; i < cloud_in->size(); i++) {
        pcl::PointXYZ p;
        p.x = cloud_in->points[i].x;
        p.y = cloud_in->points[i].y;
        p.z = cloud_in->points[i].z;
        cloud_out->points.push_back(p);
    }

    return cloud_out;

}

OBB findOBBPoint(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in)
{
    OBB obbPoint;//创建一个返回对象
    pcl::MomentOfInertiaEstimation <pcl::PointXYZ> feature_extractor;//实例化一个对象
    feature_extractor.setInputCloud (cloud_in);//设置输入点云
    feature_extractor.compute ();//开始特征计算
    std::vector <float> moment_of_inertia;//存放惯性距的特征向量
    std::vector <float> eccentricity;//存放偏心率的特征向量

    pcl::PointXYZ min_point_OBB;
    pcl::PointXYZ max_point_OBB;
    pcl::PointXYZ position_OBB;
    Eigen::Matrix3f rotational_matrix_OBB;
    float major_value, middle_value, minor_value;
    Eigen::Vector3f major_vector, middle_vector, minor_vector;
    Eigen::Vector3f mass_center;
    feature_extractor.getMomentOfInertia (moment_of_inertia);//计算出的惯性矩
    feature_extractor.getEccentricity (eccentricity);//计算出的偏心率
    feature_extractor.getOBB (min_point_OBB, max_point_OBB, position_OBB, rotational_matrix_OBB);//OBB对应的相关参数

    feature_extractor.getEigenValues (major_value, middle_value, minor_value);//三个特征值
    feature_extractor.getEigenVectors (major_vector, middle_vector, minor_vector);//三个特征向量
    feature_extractor.getMassCenter (mass_center);//计算质心

    Eigen::Vector3f position (position_OBB.x, position_OBB.y, position_OBB.z);
    std::cout<<"position_OBB: "<<position_OBB<<endl;
    std::cout<<"mass_center: "<<mass_center<<endl;//中心坐标
    Eigen::Quaternionf quat (rotational_matrix_OBB);

    std::cout<<"size of cloud :"<<cloud_in->points.size()<<endl;
    std::cout<<"moment_of_inertia :"<<moment_of_inertia.size()<<endl;
    std::cout<<"eccentricity :"<<eccentricity.size()<<endl;

    obbPoint.position = position;
    obbPoint.max_point_OBB = max_point_OBB;
    obbPoint.min_point_OBB = min_point_OBB;
    obbPoint.quat = quat;



    return obbPoint;
}

  • 10
    点赞
  • 64
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 20
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 20
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Tech沉思录

点赞加投币,感谢您的资瓷~

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值