ASIFT-GPU(changchangwu)+opencv+ICP+可视化

CMakeLists.txt 

cmake_minimum_required(VERSION 2.8.3)
project(test_siftgpu)

# OpenCV依赖
set(OpenCV_DIR "/media/spple/新加卷1/opencv-3.4.1/install_4/share/OpenCV/")
find_package( OpenCV REQUIRED )

set(PCL_DIR "/media/spple/新加卷/Dataset/pcl-master/install/share/pcl-1.9/")
find_package(PCL REQUIRED)

# OpenGL
find_package(OpenGL REQUIRED)

# GLUT
find_package(GLUT REQUIRED)

# Glew
find_package(GLEW REQUIRED)

# SiftGPU:手动设置其头文件与库文件所在位置
include_directories("/home/spple/CLionProjects/SIFT-GPU/src/SiftGPU/" ${OpenGL_INCLUDE_DIR})
set(SIFTGPU_LIBS "/home/spple/CLionProjects/SIFT-GPU/bin/libsiftgpu.so")

add_executable( testSIFTGPU main_1.cpp )

target_link_libraries( testSIFTGPU dl)
target_link_libraries( testSIFTGPU
    ${OpenCV_LIBS}
    ${SIFTGPU_LIBS}
    ${GLEW_LIBRARIES} ${GLUT_LIBRARIES} ${OPENGL_LIBRARIES}
)

include_directories(${PCL_INCLUDE_DIRS})
link_directories(${PCL_LIBRARY_DIRS})
add_definitions(${PCL_DEFINITIONS})
list(REMOVE_ITEM PCL_LIBRARIES "vtkproj4")
target_link_libraries(testSIFTGPU ${PCL_LIBRARIES})
#include <vector>
#include <ctime>
#include <string>
#include <iostream>
using std::vector;
using std::iostream;

#include "SiftGPU.h"
#include "GL/gl.h"

#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgcodecs/imgcodecs.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/cudawarping.hpp"
#include "opencv2/cudafilters.hpp"
#include <opencv2/cudafeatures2d.hpp>
using cv::cuda::GpuMat;

#include <pcl/io/ply_io.h>
#include <pcl/point_types.h>
#include <pcl/registration/icp.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/time.h>   // TicToc
#include <pcl/filters/voxel_grid.h>

//PointXYZRGBNormal PointNormal
typedef pcl::PointXYZRGBNormal PointT;
typedef pcl::PointCloud<PointT> PointCloudT;

bool next_iteration = false;


void
print4x4Matrix (const Eigen::Matrix4d & matrix)
{
    printf ("Rotation matrix :\n");
    printf ("    | %6.3f %6.3f %6.3f | \n", matrix (0, 0), matrix (0, 1), matrix (0, 2));
    printf ("R = | %6.3f %6.3f %6.3f | \n", matrix (1, 0), matrix (1, 1), matrix (1, 2));
    printf ("    | %6.3f %6.3f %6.3f | \n", matrix (2, 0), matrix (2, 1), matrix (2, 2));
    printf ("Translation vector :\n");
    printf ("t = < %6.3f, %6.3f, %6.3f >\n\n", matrix (0, 3), matrix (1, 3), matrix (2, 3));
}

void
keyboardEventOccurred (const pcl::visualization::KeyboardEvent& event,
                       void* nothing)
{
    if (event.getKeySym () == "space" && event.keyDown ())
        next_iteration = true;
}

// Downsample
void voxel_grid_filter(PointCloudT::Ptr cloud_in, float leafsize)
{
    pcl::VoxelGrid<PointT> sor;
    sor.setInputCloud(cloud_in);
    sor.setLeafSize(leafsize, leafsize, leafsize);
    sor.filter(*cloud_in);
    // std::cout << "after filter has " << cloud_in->points.size () << " points\n";
}


class ASiftDetector{

    enum InitStatus{
        INIT_OK,
        INIT_IS_NOT_SUPPORT,
        INIT_VERIFY_FAILED
    };

public:
    ASiftDetector() = default;
    ~ASiftDetector() {
        if(m_siftGpuDetector)
        {
            m_siftGpuDetector->DestroyContextGL();
            delete m_siftGpuDetector;
        }
        if(m_siftGpuMatcher)
        {
            m_siftGpuMatcher->DestroyContextGL();
            delete m_siftGpuMatcher;
        }
    }
    InitStatus create(){
        m_siftGpuDetector = new SiftGPU();

//        char* myargv[4] = {"-fo","-1","-v","1"};
//        m_siftGpuDetector->ParseParam(4,myargv);
        char* myargv[6] = {"-fo","-1","-v","1","-cuda","0"};
        m_siftGpuDetector->ParseParam(6,myargv);

        // Set edge threshold, dog threshold

        if(m_siftGpuDetector->CreateContextGL() != SiftGPU::SIFTGPU_FULL_SUPPORTED){
            std::cerr << "SiftGPU is not supported!" << std::endl;
            return InitStatus::INIT_IS_NOT_SUPPORT;
        }

        m_maxMatch = 50000;

        m_siftGpuMatcher = new SiftMatchGPU(m_maxMatch);
        m_siftGpuMatcher->VerifyContextGL();

        return INIT_OK;
    }

    void affineSkew(double tilt, double phi, cv::Mat& img, cv::Mat& mask, cv::Mat& Ai)
    {
        int h = img.rows;
        int w = img.cols;

        mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));

        cv::Mat A = cv::Mat::eye(2,3, CV_32F);

        if(phi != 0.0)
        {
            phi *= M_PI/180.;
            double s = sin(phi);
            double c = cos(phi);

            A = (cv::Mat_<float>(2,2) << c, -s, s, c);

            cv::Mat corners = (cv::Mat_<float>(4,2) << 0, 0, w, 0, w, h, 0, h);
            cv::Mat tcorners = corners*A.t();
            cv::Mat tcorners_x, tcorners_y;
            tcorners.col(0).copyTo(tcorners_x);
            tcorners.col(1).copyTo(tcorners_y);
            std::vector<cv::Mat> channels;
            channels.push_back(tcorners_x);
            channels.push_back(tcorners_y);
            merge(channels, tcorners);

            cv::Rect rect = boundingRect(tcorners);
            A =  (cv::Mat_<float>(2,3) << c, -s, -rect.x, s, c, -rect.y);

            cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
        }
        if(tilt != 1.0)
        {
            double s = 0.8*sqrt(tilt*tilt-1);
            cv::GaussianBlur(img, img, cv::Size(0,0), s, 0.01);
            cv::resize(img, img, cv::Size(0,0), 1.0/tilt, 1.0, cv::INTER_NEAREST);
            A.row(0) = A.row(0)/tilt;
        }
        if(tilt != 1.0 || phi != 0.0)
        {
            h = img.rows;
            w = img.cols;
            cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
        }
        invertAffineTransform(A, Ai);
    }

    void detectAndCompute(const cv::Mat &img, vector<cv::KeyPoint> &kptsAll, cv::Mat &descAll){
        //affine参数
        vector<vector<double>> params;
        vector<double> temp;
        temp.push_back(1.0);
        temp.push_back(0.0);
        params.push_back(temp);
        for(int tl = 1; tl < 6; tl++) {
            double t = pow(2, 0.5 * tl);
            for (double phi = 0; phi < 180; phi += 72.0/t) {
                temp.clear();
                temp.push_back(t);
                temp.push_back(phi);
                params.push_back(temp);
            }
        }

        kptsAll.clear();
        descAll = cv::Mat(0, 128, CV_32F);
        int num=0;

        for(int aff = 0; aff < params.size(); aff++)
        {
            double tilt = params[aff][0];
            double phi = params[aff][1];

            cv::Mat descriptors;
            vector<cv::KeyPoint> kpts;

            cv::Mat timg, mask, Ai;
            img.copyTo(timg);

            affineSkew(tilt, phi, timg, mask, Ai);
            cv::Mat img_disp;
            cv::bitwise_and(timg, timg, img_disp, mask);

            m_siftGpuDetector->RunSIFT(img_disp.cols,img_disp.rows,img_disp.data, GL_LUMINANCE, GL_UNSIGNED_BYTE);
            //m_siftGpuDetector->RunSIFT(img_disp.cols,img_disp.rows,img_disp.data,GL_RGB,GL_UNSIGNED_BYTE);

            auto num1 = m_siftGpuDetector->GetFeatureNum();
            vector<float> des(128 * num1);
            vector<SiftGPU::SiftKeypoint> SiftGPUkeypoints(num1);
            m_siftGpuDetector->GetFeatureVector(&SiftGPUkeypoints[0],&des[0]);
            // Trans to Mat
            cv::Mat m(des);
            descriptors = m.reshape(1,num1).clone();


            for(const SiftGPU::SiftKeypoint &kp : SiftGPUkeypoints){
                cv::Point3f kpt(kp.x, kp.y, 1);
                cv::Mat kpt_t = Ai*cv::Mat(kpt);
                float x = kpt_t.at<float>(0,0);
                float y = kpt_t.at<float>(1,0);
                //越界判断
                if ((x < 0) || (y < 0 || (x > img.cols-1) || (y > img.rows-1)))
                {
                    num++;
                    if (x < 0)
                        x = 0;
                    if (y < 0)
                        y = 0;
                    if (x > img.cols-1)
                        x = img.cols-1;
                    if (y > img.rows-1)
                        y = img.rows-1;
                }
                cv::KeyPoint temp(x,y,kp.s,kp.o);
                kpts.push_back(temp);
            }

            kptsAll.insert(kptsAll.end(), kpts.begin(), kpts.end());
            descAll.push_back(descriptors);
        }
        std::cout <<"affineSkew num:::" << num << std::endl;

    }

    void transToRootSift(const cv::Mat &siftFeature,cv::Mat &rootSiftFeature){
        for(int i = 0; i < siftFeature.rows; i ++){
            // Conver to float type
            cv::Mat f;
            siftFeature.row(i).convertTo(f,CV_32FC1);

            cv::normalize(f,f,1,0,cv::NORM_L1); // l1 normalize
            sqrt(f,f); // sqrt-root  root-sift
            rootSiftFeature.push_back(f);
        }
    }

    int gpuMatch(const cv::Mat &des1,const cv::Mat &des2){

        //m_siftGpuMatcher->SetMaxSift();

        m_siftGpuMatcher->SetDescriptors(0,des1.rows,des1.data);
        m_siftGpuMatcher->SetDescriptors(1,des2.rows,des2.data);

        int (*match_buf)[2] = new int[m_maxMatch][2];

        auto matchNum = m_siftGpuMatcher->GetSiftMatch(m_maxMatch,match_buf);

        delete[] match_buf;

        return matchNum;
    }

    void setMaxSift(int max_sift){
        m_siftGpuMatcher->SetMaxSift(max_sift);
        m_maxMatch = max_sift;
    }

    int gpuMatch(const cv::Mat &des1,const cv::Mat &des2,vector<cv::DMatch>& matches){

        m_siftGpuMatcher->SetDescriptors(0,des1.rows,(float*)des1.data);
        m_siftGpuMatcher->SetDescriptors(1,des2.rows,(float*)des2.data);

        int (*match_buf)[2] = new int[m_maxMatch][2];

        std::cout<<"m_maxMatch "<< m_maxMatch << std::endl;

        auto matchNum = m_siftGpuMatcher->GetSiftMatch(5000,match_buf);

        for(int i = 0 ;i  < matchNum; i ++) {
            cv::DMatch dm(match_buf[i][0],match_buf[i][1],0);
            matches.push_back(dm);
        }

        delete[] match_buf;
        return matchNum;
    }

private:
    SiftGPU *m_siftGpuDetector;
    SiftMatchGPU *m_siftGpuMatcher;

    int m_maxMatch;
};


cv::Mat LeftUpRightDownDrawInlier(const cv::Mat &queryImage, const cv::Mat &objectImage,
                                  const vector<cv::Point2f> &queryCoord, const vector<cv::Point2f> &objectCoord, std::string filename)
{
    cv::Size sz = cv::Size(queryImage.size().width + objectImage.size().width,
                   queryImage.size().height + objectImage.size().height);
    cv::Mat matchingImage = cv::Mat::zeros(sz, CV_8UC3);

    // 设置matchingImage的感兴趣区域大小并赋予原图
    cv::Mat roi1 = cv::Mat(matchingImage, cv::Rect(0, 0, queryImage.size().width, queryImage.size().height));
    queryImage.copyTo(roi1);
    cv::Mat roi2 = cv::Mat(matchingImage, cv::Rect(queryImage.size().width, queryImage.size().height, objectImage.size().width, objectImage.size().height));
    objectImage.copyTo(roi2);

    //画出点
    for (int i = 0; i < (int)queryCoord.size(); ++i) {
        cv::Point2f pt1 = queryCoord[i];
        cv::Point2f pt2 = objectCoord[i];
        cv::Point2f from = pt1;
        cv::Point2f to = cv::Point(pt2.x + queryImage.size().width, pt2.y + queryImage.size().height);
        line(matchingImage, from, to, cv::Scalar(0, 255, 255));
    }
    cv::imwrite(filename, matchingImage);
    return matchingImage;
}

/*
 * Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
 * Input:
 *  A: Nxm numpy array of corresponding points
 *  B: Nxm numpy array of corresponding points
 * Returns:
 *  T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
 *  R: mxm rotation matrix
 *  t: mx1 translation vector
 *  //https://blog.csdn.net/kewei9/article/details/74157236
 */
void best_fit_transform(std::vector<cv::Point3f> A, std::vector<cv::Point3f> B, cv::Mat & T, cv::Mat & R, cv::Mat & t)
{
    if (A.size()!=B.size())
    {
        std::cout <<"error:::" << "A.size()!=B.size()" << std::endl;
    }
    int pointsNum = A.size();

    //# translate points to their centroids
    double srcSumX = 0.0f;
    double srcSumY = 0.0f;
    double srcSumZ = 0.0f;

    double dstSumX = 0.0f;
    double dstSumY = 0.0f;
    double dstSumZ = 0.0f;

    for (int i = 0; i < pointsNum; ++ i)
    {
        srcSumX += A[i].x;
        srcSumY += A[i].y;
        srcSumZ += A[i].z;

        dstSumX += B[i].x;
        dstSumY += B[i].y;
        dstSumZ += B[i].z;
    }

    cv::Point3f centerSrc, centerDst;

    centerSrc.x = float(srcSumX / pointsNum);
    centerSrc.y = float(srcSumY / pointsNum);
    centerSrc.z = float(srcSumZ / pointsNum);

    centerDst.x = float(dstSumX / pointsNum);
    centerDst.y = float(dstSumY / pointsNum);
    centerDst.z = float(dstSumZ / pointsNum);

    int m  = 3;
    cv::Mat srcMat(m, pointsNum, CV_32FC1);
    cv::Mat dstMat(m, pointsNum, CV_32FC1);

    float* srcDat = (float*)(srcMat.data);
    float* dstDat = (float*)(dstMat.data);
    for (int i = 0; i < pointsNum; ++ i)
    {
        srcDat[i] = A[i].x - centerSrc.x;
        srcDat[pointsNum + i] = A[i].y - centerSrc.y;
        srcDat[pointsNum * 2 + i] = A[i].z - centerSrc.z;

        dstDat[i] = B[i].x - centerDst.x;
        dstDat[pointsNum + i] = B[i].y - centerDst.y;
        dstDat[pointsNum * 2 + i] = B[i].z - centerDst.z;
    }

    //# rotation matrix
    cv::Mat matS = srcMat * dstMat.t();

    cv::Mat matU, matW, matV;
    cv::SVDecomp(matS, matW, matU, matV);
    R = matV.t() * matU.t();

    //# special reflection case
    double det = cv::determinant(R);
    if (det < 0)
    {
        //https://blog.csdn.net/xiaowei_cqu/article/details/19839019
        float* data= matV.ptr<float>(m-1);
        for (int i=0; i<matV.cols; i++) {
            *data++= (*data * (-1));
        }
        R = matV.t() * matU.t();
    }

    //t = centroid_B.T - np.dot(R, centroid_A.T)
    //# translation
    //Mat(centerDst)=3*1
    t = cv::Mat(centerDst) - (R * cv::Mat(centerSrc));

    //T = np.identity(m + 1)
    //T[:m, :m] = R
    //T[:m, m] = t
    //# homogeneous transformation
    double datM[] = {1, 0, 0, 0,   0, 1, 0, 0,   0, 0, 1, 0,   0, 0, 0, 1};
    cv::Mat matM(4, 4, CV_32FC1, datM);
    T = matM.clone();

}


int main()
{
    cv::Mat OriginalGrayImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_Texture_8Bit_48.png", -1);
    cv::Mat grayImg1;
    cv::cvtColor(OriginalGrayImage, grayImg1, CV_BGR2GRAY);
    assert(grayImg1.channels() == 1); // RGB

    cv::Mat targetGrayImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_Texture_8Bit_52.png", -1);
    cv::Mat grayImg2;
    cv::cvtColor(targetGrayImage, grayImg2, CV_BGR2GRAY);
    assert(grayImg1.channels() == 1); // RGB

    ASiftDetector asiftDetector;
    asiftDetector.create();

    clock_t begin = clock();
    vector<cv::KeyPoint> asiftKeypoints_query;
    cv::Mat asiftDescriptors_query;
    asiftDetector.detectAndCompute(grayImg1, asiftKeypoints_query, asiftDescriptors_query);
    vector<cv::KeyPoint> asiftKeypoints_object;
    cv::Mat asiftDescriptors_object;
    asiftDetector.detectAndCompute(grayImg2, asiftKeypoints_object, asiftDescriptors_object);
    clock_t end = clock();
    std::cout<<"Running time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;

    std::cout<<"keypoints num: "<<asiftKeypoints_query.size()<<std::endl;
    std::cout<<"keypoints num: "<<asiftKeypoints_object.size()<<std::endl;


//    begin = clock();
//    cv::Ptr<cv::cuda::DescriptorMatcher> matcher = cv::cuda::DescriptorMatcher::createBFMatcher(cv::NORM_L2);
//    vector<cv::DMatch> matches_all;
//    GpuMat gd1(asiftDescriptors_query), gd2(asiftDescriptors_object);
//    end = clock();
//    std::cout<<"up time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;
//
//    begin = clock();
//	matcher->match(gd1, gd2, matches_all);
//    end = clock();
//    std::cout<<"match time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;

    begin = clock();
    cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("FlannBased");
    vector<vector<cv::DMatch> > matches;
    matcher->knnMatch(asiftDescriptors_query,asiftDescriptors_object, matches, 2);

    std::vector<cv::Point2f> queryPoints;
    std::vector<cv::Point2f> trainPoints;
    vector<cv::DMatch> goodMatches;
    //https://blog.csdn.net/linxihe123/article/details/70173476
    vector<std::pair<cv::KeyPoint,cv::KeyPoint> >kp_pairs_temp;
    for (size_t i = 0; i < matches.size(); i++)
    {
        if ((matches[i][0].distance < 0.75 * matches[i][1].distance) && (matches[i].size()==2))
        {
            cv::KeyPoint temp1 = asiftKeypoints_query[matches[i][0].queryIdx];
            cv::KeyPoint temp2 = asiftKeypoints_object[matches[i][0].trainIdx];
            goodMatches.push_back(matches[i][0]);
            queryPoints.push_back(temp1.pt);
            trainPoints.push_back(temp2.pt);
            kp_pairs_temp.push_back(std::make_pair(temp1,temp2));
        }
    }
    end = clock();
    std::cout<<"match time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;

    cv::Mat img_RR_matches;
    std::string filename1 = "result_1.png";
    LeftUpRightDownDrawInlier(OriginalGrayImage, targetGrayImage, queryPoints, trainPoints, filename1);

    // 4点条件判断
    const int minNumberMatchesAllowed = 4;
    if (queryPoints.size() < minNumberMatchesAllowed)
        return false;

    double reprojectionThreshold=20.0;
    std::vector<unsigned char> inliersMask(goodMatches.size());
    cv::Mat homography = cv::findHomography(queryPoints,
                                    trainPoints,
                                    cv::FM_RANSAC,
                                    reprojectionThreshold,
                                    inliersMask);

    vector<std::pair<cv::KeyPoint,cv::KeyPoint> >kp_pairs;
    std::vector<cv::Point2f> newp1;
    std::vector<cv::Point2f> newp2;
    vector<cv::DMatch> goodgoodMatches;
    for (size_t i=0; i<inliersMask.size(); i++)
    {
        if (inliersMask[i])
        {
            goodgoodMatches.push_back(goodMatches[i]);
            kp_pairs.push_back(kp_pairs_temp[i]);
            newp1.push_back(kp_pairs_temp[i].first.pt);
            newp2.push_back(kp_pairs_temp[i].second.pt);
        }

    }
    std::string filename2 = "result_2.png";
    LeftUpRightDownDrawInlier(OriginalGrayImage, targetGrayImage, newp1, newp2, filename2);

    std::vector<unsigned char> EssentialMask(newp1.size());
    cv::Mat intrinsics = (cv::Mat_<float>(3, 3) << 2269.16, 0, 1065.54, 0, 2268.4, 799.032, 0, 0, 1);
    cv::Mat Essential = cv::findEssentialMat(newp1, newp2, intrinsics, cv::RANSAC, 0.999, 20, EssentialMask);

    cv::Mat OriginalDepthImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_DepthMap_48.tif", -1);
    cv::Mat targetDepthImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_DepthMap_52.tif", -1);

    std::vector<cv::Point3f> trp1;
    std::vector<cv::Point3f> trp2;
    std::vector<cv::Point2f> trp1_temp;
    std::vector<cv::Point2f> trp2_temp;
    for (size_t key=0; key<newp1.size(); key++)
    {
        float x1 = newp1[key].x;
        float y1 = newp1[key].y;
        float x2 = newp2[key].x;
        float y2 = newp2[key].y;
        float d1 = OriginalDepthImage.at<float>(int(y1),int(x1));
        cv::Point3f p1;
        p1.z = float(d1) / intrinsics.ptr<float>(2)[2];
        p1.x = (x1 - intrinsics.ptr<float>(0)[2]) * p1.z / intrinsics.ptr<float>(0)[0];
        p1.y = (y1 - intrinsics.ptr<float>(1)[2]) * p1.z / intrinsics.ptr<float>(1)[1];

        float d2 = targetDepthImage.at<float>(int(y2),int(x2));
        cv::Point3f p2;
        p2.z = float(d2) / intrinsics.ptr<float>(2)[2];
        p2.x = (x2 - intrinsics.ptr<float>(0)[2]) * p2.z / intrinsics.ptr<float>(0)[0];
        p2.y = (y2 - intrinsics.ptr<float>(1)[2]) * p2.z / intrinsics.ptr<float>(1)[1];

        //>0.001是为了去除深度为0的特征点
        if(EssentialMask[key] and ((abs(p1.x) + abs(p1.y) + abs(p1.z)) > 0.001) and ((abs(p2.x) + abs(p2.y) + abs(p2.z)) > 0.001))
        {
            trp1.push_back(p1);
            trp2.push_back(p2);

            trp1_temp.push_back(newp1[key]);
            trp2_temp.push_back(newp2[key]);
        }
    }

    std::string filename3 = "result_3.png";
    LeftUpRightDownDrawInlier(OriginalGrayImage, targetGrayImage, trp1_temp, trp2_temp, filename3);

    cv::Mat T, R, t;
    best_fit_transform(trp1, trp2, T, R, t);


    // The point clouds we will be using
    PointCloudT::Ptr cloud_in (new PointCloudT);  // Original point cloud
    PointCloudT::Ptr cloud_in_48 (new PointCloudT);  // Original point cloud
    PointCloudT::Ptr cloud_tr (new PointCloudT);  // Transformed point cloud
    PointCloudT::Ptr cloud_icp (new PointCloudT);  // ICP output point cloud

    float leafsize = 2;
    int iterations = 10;  // Default number of ICP iterations

    pcl::console::TicToc time;
    time.tic ();
    if (pcl::io::loadPLYFile ("/home/spple/CLionProjects/python_asift_gpu/save_ply/SampleFrame_52.ply", *cloud_in) < 0)
    {
        PCL_ERROR ("Error loading cloud %s.\n", "SampleFrame_52.ply");
        return (-1);
    }
    std::cout << "\nLoaded file " << "SampleFrame_52.ply" << " (" << cloud_in->size () << " points) in " << time.toc () << " ms\n" << std::endl;

    time.tic ();
    if (pcl::io::loadPLYFile ("/home/spple/CLionProjects/python_asift_gpu/save_ply/SampleFrame_48.ply", *cloud_in_48) < 0)
    {
        PCL_ERROR ("Error loading cloud %s.\n", "SampleFrame_48.ply");
        return (-1);
    }
    std::cout << "\nLoaded file " << "SampleFrame_48.ply" << " (" << cloud_in_48->size () << " points) in " << time.toc () << " ms\n" << std::endl;

    if (leafsize > 0)
    {
        voxel_grid_filter(cloud_in, leafsize);
        voxel_grid_filter(cloud_in_48, leafsize);
    }

    time.tic ();
    std::cout << "\nLoaded file " << "SampleFrame_52.ply" << " (" << cloud_in_48->size () << " points) in " << time.toc () << " ms\n" << std::endl;

    // Defining a rotation matrix and translation vector
    Eigen::Matrix4d transformation_matrix = Eigen::Matrix4d::Identity ();

    // A rotation matrix (see https://en.wikipedia.org/wiki/Rotation_matrix)
    transformation_matrix (0, 0) = R.at<float>(0,0);
    transformation_matrix (0, 1) = R.at<float>(0,1);
    transformation_matrix (0, 2) = R.at<float>(0,2);

    transformation_matrix (1, 0) = R.at<float>(1,0);
    transformation_matrix (1, 1) = R.at<float>(1,1);
    transformation_matrix (1, 2) = R.at<float>(1,2);

    transformation_matrix (2, 0) = R.at<float>(2,0);
    transformation_matrix (2, 1) = R.at<float>(2,1);
    transformation_matrix (2, 2) = R.at<float>(2,2);

    transformation_matrix (0, 3) = t.at<float>(0,0);
    transformation_matrix (1, 3) = t.at<float>(0,1);
    transformation_matrix (2, 3) = t.at<float>(0,2);

    // Display in terminal the transformation matrix
    std::cout << "Applying this rigid transformation to: cloud_in -> cloud_icp" << std::endl;
    print4x4Matrix (transformation_matrix);

    //把48(cloud_in_48)通过初始Rt矩阵进行变换,变换后的48‘(cloud_in)为原点云,去匹配目标点云52(cloud_in)
    // Executing the transformation
    pcl::transformPointCloudWithNormals (*cloud_in_48, *cloud_icp, transformation_matrix);
    //pcl::transformPointCloud (*cloud_in_48, *cloud_icp, transformation_matrix);
    *cloud_tr = *cloud_icp;  // We backup cloud_icp into cloud_tr for later use

    // The Iterative Closest Point algorithm
    time.tic ();

//    pcl::IterativeClosestPointWithNormals<pcl::PointXYZRGBNormal, pcl::PointXYZRGBNormal>::Ptr icp ( new pcl::IterativeClosestPointWithNormals<pcl::PointXYZRGBNormal, pcl::PointXYZRGBNormal> () );
//    icp->setMaximumIterations ( iterations );
//    icp->setInputSource ( cloud_icp ); // not cloud_source, but cloud_source_trans!
//    icp->setInputTarget ( cloud_in );
//    icp->setMaxCorrespondenceDistance ( 5 );
//    icp->align ( *cloud_icp );
//    icp->setMaximumIterations (1);

    pcl::IterativeClosestPoint<PointT, PointT> icp;
    icp.setMaximumIterations (iterations);
    icp.setMaxCorrespondenceDistance(5);
    icp.setInputSource (cloud_icp);
    icp.setInputTarget (cloud_in);
    icp.align (*cloud_icp);
    icp.setMaximumIterations (1);  // We set this variable to 1 for the next time we will call .align () function

    std::cout << "Applied " << iterations << " ICP iteration(s) in " << time.toc () << " ms" << std::endl;

    if (icp.hasConverged ())
    {
        std::cout << "\nICP has converged, score is " << icp.getFitnessScore () << std::endl;
        std::cout << "\nICP transformation " << iterations << " : cloud_icp -> cloud_in" << std::endl;
        transformation_matrix = icp.getFinalTransformation ().cast<double>();
        print4x4Matrix (transformation_matrix);
    }
    else
    {
        PCL_ERROR ("\nICP has not converged.\n");
        return (-1);
    }

    // Visualization
    boost::shared_ptr< pcl::visualization::PCLVisualizer > viewer(new pcl::visualization::PCLVisualizer("ICP demo"));
    //初始化相机参数
    viewer->initCameraParameters ();

    // Create two vertically separated viewports
    int v1 (0);
    int v2 (1);
    viewer->createViewPort (0.0, 0.0, 0.5, 1.0, v1);
    viewer->createViewPort (0.5, 0.0, 1.0, 1.0, v2);

    // The color we will be using
    float bckgr_gray_level = 0.0;  // Black
    float txt_gray_lvl = 1.0 - bckgr_gray_level;

    // Original point cloud is white
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_in_color_h (cloud_in, (int) 255 * txt_gray_lvl, (int) 255 * txt_gray_lvl,
                                                                               (int) 255 * txt_gray_lvl);
    viewer->addPointCloud (cloud_in, cloud_in_color_h, "cloud_in_v1", v1);
    viewer->addPointCloud (cloud_in, cloud_in_color_h, "cloud_in_v2", v2);

    // Transformed point cloud is green
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_tr_color_h (cloud_tr, 20, 180, 20);
    viewer->addPointCloud (cloud_tr, cloud_tr_color_h, "cloud_tr_v1", v1);

    // Transformed point cloud is blue
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_in48_color_h (cloud_in_48, 20, 20, 180);
    viewer->addPointCloud (cloud_in_48, cloud_in48_color_h, "cloud_in48_v1", v1);


    // ICP aligned point cloud is red
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_icp_color_h (cloud_icp, 180, 20, 20);
    viewer->addPointCloud (cloud_icp, cloud_icp_color_h, "cloud_icp_v2", v2);

    // Adding text descriptions in each viewport
    viewer->addText ("White: Original point cloud\nGreen: Matrix transformed point cloud", 10, 15, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "icp_info_1", v1);
    viewer->addText ("White: Original point cloud\nRed: ICP aligned point cloud", 10, 15, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "icp_info_2", v2);

    std::stringstream ss;
    ss << iterations;
    std::string iterations_cnt = "ICP iterations = " + ss.str ();
    viewer->addText (iterations_cnt, 10, 60, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "iterations_cnt", v2);

    // Set background color
    viewer->setBackgroundColor (bckgr_gray_level, bckgr_gray_level, bckgr_gray_level, v1);
    viewer->setBackgroundColor (bckgr_gray_level, bckgr_gray_level, bckgr_gray_level, v2);

    //添加坐标系
    viewer->addCoordinateSystem(1.0, "reference");


    // Set camera position and orientation //设置坐标原点
    //https://www.cnblogs.com/ghjnwk/p/10305796.html
    //鼠标左键,鼠标右键,鼠标滚轮,按住滚轮不放
    viewer->setCameraPosition (0, 0, 0, 0, 0, 0, 0);
    viewer->setSize (2048, 2048);  // Visualiser window size

    // Register keyboard callback :
    viewer->registerKeyboardCallback (&keyboardEventOccurred, (void*) NULL);

    // Display the visualiser
    while (!viewer->wasStopped ())
    {
        viewer->spinOnce ();

        // The user pressed "space" :
        if (next_iteration)
        {
            // The Iterative Closest Point algorithm
            time.tic ();
            icp.align (*cloud_icp);
            std::cout << "Applied 1 ICP iteration in " << time.toc () << " ms" << std::endl;

            if (icp.hasConverged ())
            {
                printf ("\033[11A");  // Go up 11 lines in terminal output.
                printf ("\nICP has converged, score is %+.0e\n", icp.getFitnessScore ());
                std::cout << "\nICP transformation " << ++iterations << " : cloud_icp -> cloud_in" << std::endl;
                transformation_matrix *= icp.getFinalTransformation ().cast<double>();  // WARNING /!\ This is not accurate! For "educational" purpose only!
                print4x4Matrix (transformation_matrix);  // Print the transformation between original pose and current pose

                ss.str ("");
                ss << iterations;
                std::string iterations_cnt = "ICP iterations = " + ss.str ();
                viewer->updateText (iterations_cnt, 10, 60, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "iterations_cnt");
                viewer->updatePointCloud (cloud_icp, cloud_icp_color_h, "cloud_icp_v2");
            }
            else
            {
                PCL_ERROR ("\nICP has not converged.\n");
                return (-1);
            }
        }
        next_iteration = false;
    }


    return 1;
}

优化算法:

W:2064

H:1544

GTX1060-notebook

越界特征点数--affineSkew num:::129
提取特征 time: 3423.67ms
keypoints1 num: 85729
keypoints2 num: 103120
match time: 1808.42ms

#include <vector>
#include <ctime>
#include <string>
#include <iostream>
using std::vector;
using std::iostream;

#include "SiftGPU.h"
#include "GL/gl.h"

#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgcodecs/imgcodecs.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/cudawarping.hpp"
#include "opencv2/cudafilters.hpp"
#include <opencv2/cudafeatures2d.hpp>
using cv::cuda::GpuMat;

#include <pcl/io/ply_io.h>
#include <pcl/point_types.h>
#include <pcl/registration/icp.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/time.h>   // TicToc
#include <pcl/filters/voxel_grid.h>

//PointXYZRGBNormal PointNormal
typedef pcl::PointXYZRGBNormal PointT;
typedef pcl::PointCloud<PointT> PointCloudT;

bool next_iteration = false;


void
print4x4Matrix (const Eigen::Matrix4d & matrix)
{
    printf ("Rotation matrix :\n");
    printf ("    | %6.3f %6.3f %6.3f | \n", matrix (0, 0), matrix (0, 1), matrix (0, 2));
    printf ("R = | %6.3f %6.3f %6.3f | \n", matrix (1, 0), matrix (1, 1), matrix (1, 2));
    printf ("    | %6.3f %6.3f %6.3f | \n", matrix (2, 0), matrix (2, 1), matrix (2, 2));
    printf ("Translation vector :\n");
    printf ("t = < %6.3f, %6.3f, %6.3f >\n\n", matrix (0, 3), matrix (1, 3), matrix (2, 3));
}

void
keyboardEventOccurred (const pcl::visualization::KeyboardEvent& event,
                       void* nothing)
{
    if (event.getKeySym () == "space" && event.keyDown ())
        next_iteration = true;
}

// Downsample
void voxel_grid_filter(PointCloudT::Ptr cloud_in, float leafsize)
{
    pcl::VoxelGrid<PointT> sor;
    sor.setInputCloud(cloud_in);
    sor.setLeafSize(leafsize, leafsize, leafsize);
    sor.filter(*cloud_in);
    // std::cout << "after filter has " << cloud_in->points.size () << " points\n";
}


class ASiftDetector{

    enum InitStatus{
        INIT_OK,
        INIT_IS_NOT_SUPPORT,
        INIT_VERIFY_FAILED
    };

public:
    ASiftDetector() = default;
    ~ASiftDetector() {
        if(m_siftGpuDetector)
        {
            m_siftGpuDetector->DestroyContextGL();
            delete m_siftGpuDetector;
        }
        if(m_siftGpuMatcher)
        {
            m_siftGpuMatcher->DestroyContextGL();
            delete m_siftGpuMatcher;
        }
    }
    InitStatus create(){
        m_siftGpuDetector = new SiftGPU();

//        char* myargv[4] = {"-fo","-1","-v","1"};
//        m_siftGpuDetector->ParseParam(4,myargv);
        char* myargv[6] = {"-fo","-1","-v","1","-cuda","0"};
        m_siftGpuDetector->ParseParam(6,myargv);

        // Set edge threshold, dog threshold

        if(m_siftGpuDetector->CreateContextGL() != SiftGPU::SIFTGPU_FULL_SUPPORTED){
            std::cerr << "SiftGPU is not supported!" << std::endl;
            return InitStatus::INIT_IS_NOT_SUPPORT;
        }

        m_maxMatch = 50000;

        m_siftGpuMatcher = new SiftMatchGPU(m_maxMatch);
        m_siftGpuMatcher->VerifyContextGL();

        return INIT_OK;
    }

    void affineSkew(double tilt, double phi, cv::Mat& img, cv::Mat& mask, cv::Mat& Ai, cv::cuda::GpuMat &gI)
    {
#define USE_OPENCV_GPU

        int h = img.rows;
        int w = img.cols;

        mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));

        cv::Mat A = cv::Mat::eye(2,3, CV_32F);

        if(phi != 0.0)
        {
            phi *= M_PI/180.;
            double s = sin(phi);
            double c = cos(phi);

            A = (cv::Mat_<float>(2,2) << c, -s, s, c);

            cv::Mat corners = (cv::Mat_<float>(4,2) << 0, 0, w, 0, w, h, 0, h);
            cv::Mat tcorners = corners*A.t();
            cv::Mat tcorners_x, tcorners_y;
            tcorners.col(0).copyTo(tcorners_x);
            tcorners.col(1).copyTo(tcorners_y);
            std::vector<cv::Mat> channels;
            channels.push_back(tcorners_x);
            channels.push_back(tcorners_y);
            merge(channels, tcorners);
            //Rect(intx,inty,intwidth,intheight);
            cv::Rect rect = boundingRect(tcorners);
            A =  (cv::Mat_<float>(2,3) << c, -s, -rect.x, s, c, -rect.y);

#ifdef USE_OPENCV_GPU
            gI.upload(img);
            cv::cuda::warpAffine(gI, gI, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
            gI.download(img);
#else
            cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
#endif

        }
        if(tilt != 1.0)
        {
            double s = 0.8*sqrt(tilt*tilt-1);

#ifdef USE_OPENCV_GPU
            gI.upload(img);
            cv::Ptr<cv::cuda::Filter> filter = cv::cuda::createGaussianFilter(gI.type(), gI.type(), cv::Size(0,0), s, 0.01);
            filter->apply(gI, gI);
            cv::cuda::resize(gI, gI, cv::Size(0,0), 1.0/tilt, 1.0, cv::INTER_NEAREST);
            gI.download(img);
#else
            cv::GaussianBlur(img, img, cv::Size(0,0), s, 0.01);
            cv::resize(img, img, cv::Size(0,0), 1.0/tilt, 1.0, cv::INTER_NEAREST);
#endif

            A.row(0) = A.row(0)/tilt;
        }
        if(tilt != 1.0 || phi != 0.0)
        {
            h = img.rows;
            w = img.cols;

#ifdef USE_OPENCV_GPU
            gI.upload(mask);
            cv::cuda::warpAffine(gI, gI, A, cv::Size(w, h), cv::INTER_NEAREST);
            gI.download(mask);
#else
            cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
#endif

        }
        invertAffineTransform(A, Ai);
    }

    void detectAndCompute(const cv::Mat &img, vector<cv::KeyPoint> &kptsAll, cv::Mat &descAll){
        //affine参数
        vector<vector<double>> params;
        vector<double> temp;
        temp.push_back(1.0);
        temp.push_back(0.0);
        params.push_back(temp);
        for(int tl = 1; tl < 6; tl++) {
            double t = pow(2, 0.5 * tl);
            for (double phi = 0; phi < 180; phi += 72.0/t) {
                temp.clear();
                temp.push_back(t);
                temp.push_back(phi);
                params.push_back(temp);
            }
        }

        kptsAll.clear();
        descAll = cv::Mat(0, 128, CV_32F);
        int num=0;
        cv::cuda::GpuMat cvGpuMat;

        for(int aff = 0; aff < params.size(); aff++)
        {
            double tilt = params[aff][0];
            double phi = params[aff][1];

            cv::Mat descriptors;
            vector<cv::KeyPoint> kpts;

            cv::Mat timg, mask, Ai;
            timg = img.clone();

            affineSkew(tilt, phi, timg, mask, Ai, cvGpuMat);
            cv::Mat img_disp;
            cv::bitwise_and(timg, timg, img_disp, mask);

            m_siftGpuDetector->RunSIFT(img_disp.cols,img_disp.rows,img_disp.data, GL_LUMINANCE, GL_UNSIGNED_BYTE);
            //m_siftGpuDetector->RunSIFT(img_disp.cols,img_disp.rows,img_disp.data,GL_RGB,GL_UNSIGNED_BYTE);

            auto num1 = m_siftGpuDetector->GetFeatureNum();
            vector<float> des(128 * num1);
            vector<SiftGPU::SiftKeypoint> SiftGPUkeypoints(num1);
            m_siftGpuDetector->GetFeatureVector(&SiftGPUkeypoints[0],&des[0]);
            // Trans to Mat
            cv::Mat m(des);
            descriptors = m.reshape(1,num1).clone();


            for(const SiftGPU::SiftKeypoint &kp : SiftGPUkeypoints){
                cv::Point3f kpt(kp.x, kp.y, 1);
                cv::Mat kpt_t = Ai*cv::Mat(kpt);
                float x = kpt_t.at<float>(0,0);
                float y = kpt_t.at<float>(1,0);
                //越界判断
                if ((x < 0) || (y < 0 || (x > img.cols-1) || (y > img.rows-1)))
                {
                    num++;
                    if (x < 0)
                        x = 0;
                    if (y < 0)
                        y = 0;
                    if (x > img.cols-1)
                        x = img.cols-1;
                    if (y > img.rows-1)
                        y = img.rows-1;
                }
                cv::KeyPoint temp(x,y,kp.s,kp.o);
                kpts.push_back(temp);
            }

            kptsAll.insert(kptsAll.end(), kpts.begin(), kpts.end());
            descAll.push_back(descriptors);
        }
        std::cout <<"affineSkew num:::" << num << std::endl;

    }

    void transToRootSift(const cv::Mat &siftFeature,cv::Mat &rootSiftFeature){
        for(int i = 0; i < siftFeature.rows; i ++){
            // Conver to float type
            cv::Mat f;
            siftFeature.row(i).convertTo(f,CV_32FC1);

            cv::normalize(f,f,1,0,cv::NORM_L1); // l1 normalize
            sqrt(f,f); // sqrt-root  root-sift
            rootSiftFeature.push_back(f);
        }
    }

    int gpuMatch(const cv::Mat &des1,const cv::Mat &des2){

        //m_siftGpuMatcher->SetMaxSift();

        m_siftGpuMatcher->SetDescriptors(0,des1.rows,des1.data);
        m_siftGpuMatcher->SetDescriptors(1,des2.rows,des2.data);

        int (*match_buf)[2] = new int[m_maxMatch][2];

        auto matchNum = m_siftGpuMatcher->GetSiftMatch(m_maxMatch,match_buf);

        delete[] match_buf;

        return matchNum;
    }

    void setMaxSift(int max_sift){
        m_siftGpuMatcher->SetMaxSift(max_sift);
        m_maxMatch = max_sift;
    }

    int gpuMatch(const cv::Mat &des1,const cv::Mat &des2,vector<cv::DMatch>& matches){

        m_siftGpuMatcher->SetDescriptors(0,des1.rows,(float*)des1.data);
        m_siftGpuMatcher->SetDescriptors(1,des2.rows,(float*)des2.data);

        int (*match_buf)[2] = new int[m_maxMatch][2];

        std::cout<<"m_maxMatch "<< m_maxMatch << std::endl;

        auto matchNum = m_siftGpuMatcher->GetSiftMatch(5000,match_buf);

        for(int i = 0 ;i  < matchNum; i ++) {
            cv::DMatch dm(match_buf[i][0],match_buf[i][1],0);
            matches.push_back(dm);
        }

        delete[] match_buf;
        return matchNum;
    }

private:
    SiftGPU *m_siftGpuDetector;
    SiftMatchGPU *m_siftGpuMatcher;

    int m_maxMatch;
};


cv::Mat LeftUpRightDownDrawInlier(const cv::Mat &queryImage, const cv::Mat &objectImage,
                                  const vector<cv::Point2f> &queryCoord, const vector<cv::Point2f> &objectCoord, std::string filename)
{
    cv::Size sz = cv::Size(queryImage.size().width + objectImage.size().width,
                   queryImage.size().height + objectImage.size().height);
    cv::Mat matchingImage = cv::Mat::zeros(sz, CV_8UC3);

    // 设置matchingImage的感兴趣区域大小并赋予原图
    cv::Mat roi1 = cv::Mat(matchingImage, cv::Rect(0, 0, queryImage.size().width, queryImage.size().height));
    queryImage.copyTo(roi1);
    cv::Mat roi2 = cv::Mat(matchingImage, cv::Rect(queryImage.size().width, queryImage.size().height, objectImage.size().width, objectImage.size().height));
    objectImage.copyTo(roi2);

    //画出点
    for (int i = 0; i < (int)queryCoord.size(); ++i) {
        cv::Point2f pt1 = queryCoord[i];
        cv::Point2f pt2 = objectCoord[i];
        cv::Point2f from = pt1;
        cv::Point2f to = cv::Point(pt2.x + queryImage.size().width, pt2.y + queryImage.size().height);
        line(matchingImage, from, to, cv::Scalar(0, 255, 255));
    }
    cv::imwrite(filename, matchingImage);
    return matchingImage;
}

/*
 * Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
 * Input:
 *  A: Nxm numpy array of corresponding points
 *  B: Nxm numpy array of corresponding points
 * Returns:
 *  T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
 *  R: mxm rotation matrix
 *  t: mx1 translation vector
 *  //https://blog.csdn.net/kewei9/article/details/74157236
 */
void best_fit_transform(std::vector<cv::Point3f> A, std::vector<cv::Point3f> B, cv::Mat & T, cv::Mat & R, cv::Mat & t)
{
    if (A.size()!=B.size())
    {
        std::cout <<"error:::" << "A.size()!=B.size()" << std::endl;
    }
    int pointsNum = A.size();

    //# translate points to their centroids
    double srcSumX = 0.0f;
    double srcSumY = 0.0f;
    double srcSumZ = 0.0f;

    double dstSumX = 0.0f;
    double dstSumY = 0.0f;
    double dstSumZ = 0.0f;

    for (int i = 0; i < pointsNum; ++ i)
    {
        srcSumX += A[i].x;
        srcSumY += A[i].y;
        srcSumZ += A[i].z;

        dstSumX += B[i].x;
        dstSumY += B[i].y;
        dstSumZ += B[i].z;
    }

    cv::Point3f centerSrc, centerDst;

    centerSrc.x = float(srcSumX / pointsNum);
    centerSrc.y = float(srcSumY / pointsNum);
    centerSrc.z = float(srcSumZ / pointsNum);

    centerDst.x = float(dstSumX / pointsNum);
    centerDst.y = float(dstSumY / pointsNum);
    centerDst.z = float(dstSumZ / pointsNum);

    int m  = 3;
    cv::Mat srcMat(m, pointsNum, CV_32FC1);
    cv::Mat dstMat(m, pointsNum, CV_32FC1);

    float* srcDat = (float*)(srcMat.data);
    float* dstDat = (float*)(dstMat.data);
    for (int i = 0; i < pointsNum; ++ i)
    {
        srcDat[i] = A[i].x - centerSrc.x;
        srcDat[pointsNum + i] = A[i].y - centerSrc.y;
        srcDat[pointsNum * 2 + i] = A[i].z - centerSrc.z;

        dstDat[i] = B[i].x - centerDst.x;
        dstDat[pointsNum + i] = B[i].y - centerDst.y;
        dstDat[pointsNum * 2 + i] = B[i].z - centerDst.z;
    }

    //# rotation matrix
    cv::Mat matS = srcMat * dstMat.t();

    cv::Mat matU, matW, matV;
    cv::SVDecomp(matS, matW, matU, matV);
    R = matV.t() * matU.t();

    //# special reflection case
    double det = cv::determinant(R);
    if (det < 0)
    {
        //https://blog.csdn.net/xiaowei_cqu/article/details/19839019
        float* data= matV.ptr<float>(m-1);
        for (int i=0; i<matV.cols; i++) {
            *data++= (*data * (-1));
        }
        R = matV.t() * matU.t();
    }

    //t = centroid_B.T - np.dot(R, centroid_A.T)
    //# translation
    //Mat(centerDst)=3*1
    t = cv::Mat(centerDst) - (R * cv::Mat(centerSrc));

    //T = np.identity(m + 1)
    //T[:m, :m] = R
    //T[:m, m] = t
    //# homogeneous transformation
    double datM[] = {1, 0, 0, 0,   0, 1, 0, 0,   0, 0, 1, 0,   0, 0, 0, 1};
    cv::Mat matM(4, 4, CV_32FC1, datM);
    T = matM.clone();

}


int main()
{
    //http://www.kind-of-works.com/WANGFAN_site_data/GPU-ASIFT.html
    //初始化opencv-gpu
    int flag = cv::cuda::getCudaEnabledDeviceCount();
    if (flag != 0) { cv::cuda::setDevice(0); }


    cv::Mat OriginalGrayImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_Texture_8Bit_48.png", -1);
    cv::Mat grayImg1;
    cv::cvtColor(OriginalGrayImage, grayImg1, CV_BGR2GRAY);
    assert(grayImg1.channels() == 1); // RGB

    cv::Mat targetGrayImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_Texture_8Bit_52.png", -1);
    cv::Mat grayImg2;
    cv::cvtColor(targetGrayImage, grayImg2, CV_BGR2GRAY);
    assert(grayImg1.channels() == 1); // RGB

    ASiftDetector asiftDetector;
    asiftDetector.create();

    clock_t begin = clock();
    vector<cv::KeyPoint> asiftKeypoints_query;
    cv::Mat asiftDescriptors_query;
    asiftDetector.detectAndCompute(grayImg1, asiftKeypoints_query, asiftDescriptors_query);
    vector<cv::KeyPoint> asiftKeypoints_object;
    cv::Mat asiftDescriptors_object;
    asiftDetector.detectAndCompute(grayImg2, asiftKeypoints_object, asiftDescriptors_object);
    clock_t end = clock();
    std::cout<<"Running time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;

    std::cout<<"keypoints num: "<<asiftKeypoints_query.size()<<std::endl;
    std::cout<<"keypoints num: "<<asiftKeypoints_object.size()<<std::endl;


//    begin = clock();
//    cv::Ptr<cv::cuda::DescriptorMatcher> matcher = cv::cuda::DescriptorMatcher::createBFMatcher(cv::NORM_L2);
//    vector<cv::DMatch> matches_all;
//    GpuMat gd1(asiftDescriptors_query), gd2(asiftDescriptors_object);
//    end = clock();
//    std::cout<<"up time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;
//
//    begin = clock();
//	matcher->match(gd1, gd2, matches_all);
//    end = clock();
//    std::cout<<"match time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;

    begin = clock();
    cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("FlannBased");
    vector<vector<cv::DMatch> > matches;
    matcher->knnMatch(asiftDescriptors_query,asiftDescriptors_object, matches, 2);

    std::vector<cv::Point2f> queryPoints;
    std::vector<cv::Point2f> trainPoints;
    vector<cv::DMatch> goodMatches;
    //https://blog.csdn.net/linxihe123/article/details/70173476
    vector<std::pair<cv::KeyPoint,cv::KeyPoint> >kp_pairs_temp;
    for (size_t i = 0; i < matches.size(); i++)
    {
        if ((matches[i][0].distance < 0.75 * matches[i][1].distance) && (matches[i].size()==2))
        {
            cv::KeyPoint temp1 = asiftKeypoints_query[matches[i][0].queryIdx];
            cv::KeyPoint temp2 = asiftKeypoints_object[matches[i][0].trainIdx];
            goodMatches.push_back(matches[i][0]);
            queryPoints.push_back(temp1.pt);
            trainPoints.push_back(temp2.pt);
            kp_pairs_temp.push_back(std::make_pair(temp1,temp2));
        }
    }
    end = clock();
    std::cout<<"match time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;

    cv::Mat img_RR_matches;
    std::string filename1 = "result_1.png";
    LeftUpRightDownDrawInlier(OriginalGrayImage, targetGrayImage, queryPoints, trainPoints, filename1);

    // 4点条件判断
    const int minNumberMatchesAllowed = 4;
    if (queryPoints.size() < minNumberMatchesAllowed)
        return false;

    double reprojectionThreshold=20.0;
    std::vector<unsigned char> inliersMask(goodMatches.size());
    cv::Mat homography = cv::findHomography(queryPoints,
                                    trainPoints,
                                    cv::FM_RANSAC,
                                    reprojectionThreshold,
                                    inliersMask);

    vector<std::pair<cv::KeyPoint,cv::KeyPoint> >kp_pairs;
    std::vector<cv::Point2f> newp1;
    std::vector<cv::Point2f> newp2;
    vector<cv::DMatch> goodgoodMatches;
    for (size_t i=0; i<inliersMask.size(); i++)
    {
        if (inliersMask[i])
        {
            goodgoodMatches.push_back(goodMatches[i]);
            kp_pairs.push_back(kp_pairs_temp[i]);
            newp1.push_back(kp_pairs_temp[i].first.pt);
            newp2.push_back(kp_pairs_temp[i].second.pt);
        }

    }
    std::string filename2 = "result_2.png";
    LeftUpRightDownDrawInlier(OriginalGrayImage, targetGrayImage, newp1, newp2, filename2);

    std::vector<unsigned char> EssentialMask(newp1.size());
    cv::Mat intrinsics = (cv::Mat_<float>(3, 3) << 2269.16, 0, 1065.54, 0, 2268.4, 799.032, 0, 0, 1);
    cv::Mat Essential = cv::findEssentialMat(newp1, newp2, intrinsics, cv::RANSAC, 0.999, 20, EssentialMask);

    cv::Mat OriginalDepthImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_DepthMap_48.tif", -1);
    cv::Mat targetDepthImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_DepthMap_52.tif", -1);

    std::vector<cv::Point3f> trp1;
    std::vector<cv::Point3f> trp2;
    std::vector<cv::Point2f> trp1_temp;
    std::vector<cv::Point2f> trp2_temp;
    for (size_t key=0; key<newp1.size(); key++)
    {
        float x1 = newp1[key].x;
        float y1 = newp1[key].y;
        float x2 = newp2[key].x;
        float y2 = newp2[key].y;
        float d1 = OriginalDepthImage.at<float>(int(y1),int(x1));
        cv::Point3f p1;
        p1.z = float(d1) / intrinsics.ptr<float>(2)[2];
        p1.x = (x1 - intrinsics.ptr<float>(0)[2]) * p1.z / intrinsics.ptr<float>(0)[0];
        p1.y = (y1 - intrinsics.ptr<float>(1)[2]) * p1.z / intrinsics.ptr<float>(1)[1];

        float d2 = targetDepthImage.at<float>(int(y2),int(x2));
        cv::Point3f p2;
        p2.z = float(d2) / intrinsics.ptr<float>(2)[2];
        p2.x = (x2 - intrinsics.ptr<float>(0)[2]) * p2.z / intrinsics.ptr<float>(0)[0];
        p2.y = (y2 - intrinsics.ptr<float>(1)[2]) * p2.z / intrinsics.ptr<float>(1)[1];

        //>0.001是为了去除深度为0的特征点
        if(EssentialMask[key] and ((abs(p1.x) + abs(p1.y) + abs(p1.z)) > 0.001) and ((abs(p2.x) + abs(p2.y) + abs(p2.z)) > 0.001))
        {
            trp1.push_back(p1);
            trp2.push_back(p2);

            trp1_temp.push_back(newp1[key]);
            trp2_temp.push_back(newp2[key]);
        }
    }

    std::string filename3 = "result_3.png";
    LeftUpRightDownDrawInlier(OriginalGrayImage, targetGrayImage, trp1_temp, trp2_temp, filename3);

    cv::Mat T, R, t;
    best_fit_transform(trp1, trp2, T, R, t);


    // The point clouds we will be using
    PointCloudT::Ptr cloud_in (new PointCloudT);  // Original point cloud
    PointCloudT::Ptr cloud_in_48 (new PointCloudT);  // Original point cloud
    PointCloudT::Ptr cloud_tr (new PointCloudT);  // Transformed point cloud
    PointCloudT::Ptr cloud_icp (new PointCloudT);  // ICP output point cloud

    float leafsize = 2;
    int iterations = 10;  // Default number of ICP iterations

    pcl::console::TicToc time;
    time.tic ();
    if (pcl::io::loadPLYFile ("/home/spple/CLionProjects/python_asift_gpu/save_ply/SampleFrame_52.ply", *cloud_in) < 0)
    {
        PCL_ERROR ("Error loading cloud %s.\n", "SampleFrame_52.ply");
        return (-1);
    }
    std::cout << "\nLoaded file " << "SampleFrame_52.ply" << " (" << cloud_in->size () << " points) in " << time.toc () << " ms\n" << std::endl;

    time.tic ();
    if (pcl::io::loadPLYFile ("/home/spple/CLionProjects/python_asift_gpu/save_ply/SampleFrame_48.ply", *cloud_in_48) < 0)
    {
        PCL_ERROR ("Error loading cloud %s.\n", "SampleFrame_48.ply");
        return (-1);
    }
    std::cout << "\nLoaded file " << "SampleFrame_48.ply" << " (" << cloud_in_48->size () << " points) in " << time.toc () << " ms\n" << std::endl;

    if (leafsize > 0)
    {
        voxel_grid_filter(cloud_in, leafsize);
        voxel_grid_filter(cloud_in_48, leafsize);
    }

    time.tic ();
    std::cout << "\nLoaded file " << "SampleFrame_52.ply" << " (" << cloud_in_48->size () << " points) in " << time.toc () << " ms\n" << std::endl;

    // Defining a rotation matrix and translation vector
    Eigen::Matrix4d transformation_matrix = Eigen::Matrix4d::Identity ();

    // A rotation matrix (see https://en.wikipedia.org/wiki/Rotation_matrix)
    transformation_matrix (0, 0) = R.at<float>(0,0);
    transformation_matrix (0, 1) = R.at<float>(0,1);
    transformation_matrix (0, 2) = R.at<float>(0,2);

    transformation_matrix (1, 0) = R.at<float>(1,0);
    transformation_matrix (1, 1) = R.at<float>(1,1);
    transformation_matrix (1, 2) = R.at<float>(1,2);

    transformation_matrix (2, 0) = R.at<float>(2,0);
    transformation_matrix (2, 1) = R.at<float>(2,1);
    transformation_matrix (2, 2) = R.at<float>(2,2);

    transformation_matrix (0, 3) = t.at<float>(0,0);
    transformation_matrix (1, 3) = t.at<float>(0,1);
    transformation_matrix (2, 3) = t.at<float>(0,2);

    // Display in terminal the transformation matrix
    std::cout << "Applying this rigid transformation to: cloud_in -> cloud_icp" << std::endl;
    print4x4Matrix (transformation_matrix);

    //把48(cloud_in_48)通过初始Rt矩阵进行变换,变换后的48‘(cloud_in)为原点云,去匹配目标点云52(cloud_in)
    // Executing the transformation
    pcl::transformPointCloudWithNormals (*cloud_in_48, *cloud_icp, transformation_matrix);
    //pcl::transformPointCloud (*cloud_in_48, *cloud_icp, transformation_matrix);
    *cloud_tr = *cloud_icp;  // We backup cloud_icp into cloud_tr for later use

    // The Iterative Closest Point algorithm
    time.tic ();

//    pcl::IterativeClosestPointWithNormals<pcl::PointXYZRGBNormal, pcl::PointXYZRGBNormal>::Ptr icp ( new pcl::IterativeClosestPointWithNormals<pcl::PointXYZRGBNormal, pcl::PointXYZRGBNormal> () );
//    icp->setMaximumIterations ( iterations );
//    icp->setInputSource ( cloud_icp ); // not cloud_source, but cloud_source_trans!
//    icp->setInputTarget ( cloud_in );
//    icp->setMaxCorrespondenceDistance ( 5 );
//    icp->align ( *cloud_icp );
//    icp->setMaximumIterations (1);

    pcl::IterativeClosestPoint<PointT, PointT> icp;
    icp.setMaximumIterations (iterations);
    icp.setMaxCorrespondenceDistance(5);
    icp.setInputSource (cloud_icp);
    icp.setInputTarget (cloud_in);
    icp.align (*cloud_icp);
    icp.setMaximumIterations (1);  // We set this variable to 1 for the next time we will call .align () function

    std::cout << "Applied " << iterations << " ICP iteration(s) in " << time.toc () << " ms" << std::endl;

    if (icp.hasConverged ())
    {
        std::cout << "\nICP has converged, score is " << icp.getFitnessScore () << std::endl;
        std::cout << "\nICP transformation " << iterations << " : cloud_icp -> cloud_in" << std::endl;
        transformation_matrix = icp.getFinalTransformation ().cast<double>();
        print4x4Matrix (transformation_matrix);
    }
    else
    {
        PCL_ERROR ("\nICP has not converged.\n");
        return (-1);
    }

    // Visualization
    boost::shared_ptr< pcl::visualization::PCLVisualizer > viewer(new pcl::visualization::PCLVisualizer("ICP demo"));
    //初始化相机参数
    viewer->initCameraParameters ();

    // Create two vertically separated viewports
    int v1 (0);
    int v2 (1);
    viewer->createViewPort (0.0, 0.0, 0.5, 1.0, v1);
    viewer->createViewPort (0.5, 0.0, 1.0, 1.0, v2);

    // The color we will be using
    float bckgr_gray_level = 0.0;  // Black
    float txt_gray_lvl = 1.0 - bckgr_gray_level;

    // Original point cloud is white
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_in_color_h (cloud_in, (int) 255 * txt_gray_lvl, (int) 255 * txt_gray_lvl,
                                                                               (int) 255 * txt_gray_lvl);
    viewer->addPointCloud (cloud_in, cloud_in_color_h, "cloud_in_v1", v1);
    viewer->addPointCloud (cloud_in, cloud_in_color_h, "cloud_in_v2", v2);

    // Transformed point cloud is green
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_tr_color_h (cloud_tr, 20, 180, 20);
    viewer->addPointCloud (cloud_tr, cloud_tr_color_h, "cloud_tr_v1", v1);

    // Transformed point cloud is blue
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_in48_color_h (cloud_in_48, 20, 20, 180);
    viewer->addPointCloud (cloud_in_48, cloud_in48_color_h, "cloud_in48_v1", v1);


    // ICP aligned point cloud is red
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_icp_color_h (cloud_icp, 180, 20, 20);
    viewer->addPointCloud (cloud_icp, cloud_icp_color_h, "cloud_icp_v2", v2);

    // Adding text descriptions in each viewport
    viewer->addText ("White: Original point cloud\nGreen: Matrix transformed point cloud", 10, 15, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "icp_info_1", v1);
    viewer->addText ("White: Original point cloud\nRed: ICP aligned point cloud", 10, 15, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "icp_info_2", v2);

    std::stringstream ss;
    ss << iterations;
    std::string iterations_cnt = "ICP iterations = " + ss.str ();
    viewer->addText (iterations_cnt, 10, 60, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "iterations_cnt", v2);

    // Set background color
    viewer->setBackgroundColor (bckgr_gray_level, bckgr_gray_level, bckgr_gray_level, v1);
    viewer->setBackgroundColor (bckgr_gray_level, bckgr_gray_level, bckgr_gray_level, v2);

    //添加坐标系
    viewer->addCoordinateSystem(1.0, "reference");


    // Set camera position and orientation //设置坐标原点
    //https://www.cnblogs.com/ghjnwk/p/10305796.html
    //鼠标左键,鼠标右键,鼠标滚轮,按住滚轮不放
    viewer->setCameraPosition (0, 0, 0, 0, 0, 0, 0);
    viewer->setSize (2048, 2048);  // Visualiser window size

    // Register keyboard callback :
    viewer->registerKeyboardCallback (&keyboardEventOccurred, (void*) NULL);

    // Display the visualiser
    while (!viewer->wasStopped ())
    {
        viewer->spinOnce ();

        // The user pressed "space" :
        if (next_iteration)
        {
            // The Iterative Closest Point algorithm
            time.tic ();
            icp.align (*cloud_icp);
            std::cout << "Applied 1 ICP iteration in " << time.toc () << " ms" << std::endl;

            if (icp.hasConverged ())
            {
                printf ("\033[11A");  // Go up 11 lines in terminal output.
                printf ("\nICP has converged, score is %+.0e\n", icp.getFitnessScore ());
                std::cout << "\nICP transformation " << ++iterations << " : cloud_icp -> cloud_in" << std::endl;
                transformation_matrix *= icp.getFinalTransformation ().cast<double>();  // WARNING /!\ This is not accurate! For "educational" purpose only!
                print4x4Matrix (transformation_matrix);  // Print the transformation between original pose and current pose

                ss.str ("");
                ss << iterations;
                std::string iterations_cnt = "ICP iterations = " + ss.str ();
                viewer->updateText (iterations_cnt, 10, 60, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "iterations_cnt");
                viewer->updatePointCloud (cloud_icp, cloud_icp_color_h, "cloud_icp_v2");
            }
            else
            {
                PCL_ERROR ("\nICP has not converged.\n");
                return (-1);
            }
        }
        next_iteration = false;
    }


    return 1;
}

再次优化:

W:2064

H:1544

GTX1060-notebook

affineSkew num:::129
Running time: 3100.68ms
keypoints num: 85729
keypoints num: 103120
match time: 1884.04ms

#include <vector>
#include <ctime>
#include <string>
#include <iostream>
using std::vector;
using std::iostream;

#include "SiftGPU.h"
#include "GL/gl.h"

#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgcodecs/imgcodecs.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/cudawarping.hpp"
#include "opencv2/cudafilters.hpp"
#include "opencv2/cudaarithm.hpp"
#include <opencv2/cudafeatures2d.hpp>
using cv::cuda::GpuMat;

#include <pcl/io/ply_io.h>
#include <pcl/point_types.h>
#include <pcl/registration/icp.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/time.h>   // TicToc
#include <pcl/filters/voxel_grid.h>

//PointXYZRGBNormal PointNormal
typedef pcl::PointXYZRGBNormal PointT;
typedef pcl::PointCloud<PointT> PointCloudT;

bool next_iteration = false;


void
print4x4Matrix (const Eigen::Matrix4d & matrix)
{
    printf ("Rotation matrix :\n");
    printf ("    | %6.3f %6.3f %6.3f | \n", matrix (0, 0), matrix (0, 1), matrix (0, 2));
    printf ("R = | %6.3f %6.3f %6.3f | \n", matrix (1, 0), matrix (1, 1), matrix (1, 2));
    printf ("    | %6.3f %6.3f %6.3f | \n", matrix (2, 0), matrix (2, 1), matrix (2, 2));
    printf ("Translation vector :\n");
    printf ("t = < %6.3f, %6.3f, %6.3f >\n\n", matrix (0, 3), matrix (1, 3), matrix (2, 3));
}

void
keyboardEventOccurred (const pcl::visualization::KeyboardEvent& event,
                       void* nothing)
{
    if (event.getKeySym () == "space" && event.keyDown ())
        next_iteration = true;
}

// Downsample
void voxel_grid_filter(PointCloudT::Ptr cloud_in, float leafsize)
{
    pcl::VoxelGrid<PointT> sor;
    sor.setInputCloud(cloud_in);
    sor.setLeafSize(leafsize, leafsize, leafsize);
    sor.filter(*cloud_in);
    // std::cout << "after filter has " << cloud_in->points.size () << " points\n";
}


class ASiftDetector{

    enum InitStatus{
        INIT_OK,
        INIT_IS_NOT_SUPPORT,
        INIT_VERIFY_FAILED
    };

public:
    ASiftDetector() = default;
    ~ASiftDetector() {
        if(m_siftGpuDetector)
        {
            m_siftGpuDetector->DestroyContextGL();
            delete m_siftGpuDetector;
        }
        if(m_siftGpuMatcher)
        {
            m_siftGpuMatcher->DestroyContextGL();
            delete m_siftGpuMatcher;
        }
    }
    InitStatus create(){
        m_siftGpuDetector = new SiftGPU();

//        char* myargv[4] = {"-fo","-1","-v","1"};
//        m_siftGpuDetector->ParseParam(4,myargv);
        char* myargv[6] = {"-fo","-1","-v","1","-cuda","0"};
        m_siftGpuDetector->ParseParam(6,myargv);

        // Set edge threshold, dog threshold

        if(m_siftGpuDetector->CreateContextGL() != SiftGPU::SIFTGPU_FULL_SUPPORTED){
            std::cerr << "SiftGPU is not supported!" << std::endl;
            return InitStatus::INIT_IS_NOT_SUPPORT;
        }

        m_maxMatch = 50000;

        m_siftGpuMatcher = new SiftMatchGPU(m_maxMatch);
        m_siftGpuMatcher->VerifyContextGL();

        return INIT_OK;
    }

    void affineSkew(double tilt, double phi, cv::Mat& img, cv::Mat& mask, cv::Mat& Ai,
                    const int hh, const int ww,
                    const cv::cuda::GpuMat &GpuImgUp, const cv::cuda::GpuMat &GpuMaskUp,
                    cv::cuda::GpuMat &GpuImgDown, cv::cuda::GpuMat &GpuMaskDown)
    {
        GpuImgDown = GpuImgUp.clone();
        GpuMaskDown = GpuMaskUp.clone();

        int h = hh;
        int w = ww;

#define USE_OPENCV_GPU

        cv::Mat A = cv::Mat::eye(2,3, CV_32F);

        if(phi != 0.0)
        {
            phi *= M_PI/180.;
            double s = sin(phi);
            double c = cos(phi);

            A = (cv::Mat_<float>(2,2) << c, -s, s, c);

            cv::Mat corners = (cv::Mat_<float>(4,2) << 0, 0, w, 0, w, h, 0, h);
            cv::Mat tcorners = corners*A.t();
            cv::Mat tcorners_x, tcorners_y;
            tcorners.col(0).copyTo(tcorners_x);
            tcorners.col(1).copyTo(tcorners_y);
            std::vector<cv::Mat> channels;
            channels.push_back(tcorners_x);
            channels.push_back(tcorners_y);
            merge(channels, tcorners);
            //Rect(intx,inty,intwidth,intheight);
            cv::Rect rect = boundingRect(tcorners);
            A =  (cv::Mat_<float>(2,3) << c, -s, -rect.x, s, c, -rect.y);

#ifdef USE_OPENCV_GPU
            //GpuImgDown.upload(img);
            cv::cuda::warpAffine(GpuImgDown, GpuImgDown, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
            GpuImgDown.download(img);
#else
            cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
#endif

        }
        if(tilt != 1.0)
        {
            double s = 0.8*sqrt(tilt*tilt-1);

#ifdef USE_OPENCV_GPU
            //GpuImgDown.upload(img);
            cv::Ptr<cv::cuda::Filter> filter = cv::cuda::createGaussianFilter(GpuImgDown.type(), GpuImgDown.type(), cv::Size(0,0), s, 0.01);
            filter->apply(GpuImgDown, GpuImgDown);
            cv::cuda::resize(GpuImgDown, GpuImgDown, cv::Size(0,0), 1.0/tilt, 1.0, cv::INTER_NEAREST);
            GpuImgDown.download(img);
#else
            cv::GaussianBlur(img, img, cv::Size(0,0), s, 0.01);
            cv::resize(img, img, cv::Size(0,0), 1.0/tilt, 1.0, cv::INTER_NEAREST);
#endif

            A.row(0) = A.row(0)/tilt;
        }
        if(tilt != 1.0 || phi != 0.0)
        {
            h = img.rows;
            w = img.cols;

#ifdef USE_OPENCV_GPU

            cv::cuda::warpAffine(GpuMaskDown, GpuMaskDown, A, cv::Size(w, h), cv::INTER_NEAREST);
            GpuMaskDown.download(mask);
#else
            cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
#endif

        }
        invertAffineTransform(A, Ai);
    }

    void detectAndCompute(const cv::Mat &img, vector<cv::KeyPoint> &kptsAll, cv::Mat &descAll){
        //affine参数
        vector<vector<double>> params;
        vector<double> temp;
        temp.push_back(1.0);
        temp.push_back(0.0);
        params.push_back(temp);
        for(int tl = 1; tl < 6; tl++) {
            double t = pow(2, 0.5 * tl);
            for (double phi = 0; phi < 180; phi += 72.0/t) {
                temp.clear();
                temp.push_back(t);
                temp.push_back(phi);
                params.push_back(temp);
            }
        }

        kptsAll.clear();
        descAll = cv::Mat(0, 128, CV_32F);
        int num=0;
        cv::cuda::GpuMat GpuMaskUp;
        cv::cuda::GpuMat GpuImgUp;
        cv::cuda::GpuMat GpuMaskDown;
        cv::cuda::GpuMat GpuImgDown;
        cv::cuda::GpuMat GpuImgTemp;

        cv::Mat timgTemp = img.clone();
        cv::Mat maskTemp = cv::Mat(img.rows, img.cols, CV_8UC1, cv::Scalar(255));
        GpuImgUp.upload(timgTemp);
        GpuMaskUp.upload(maskTemp);

        int h = img.rows;
        int w = img.cols;

        float flag= 0;
        for(int aff = 0; aff < params.size(); aff++)
        {
            double tilt = params[aff][0];
            double phi = params[aff][1];

            cv::Mat descriptors;
            vector<cv::KeyPoint> kpts;

            cv::Mat Ai;
            cv::Mat timg = timgTemp.clone();
            cv::Mat mask = maskTemp.clone();

            affineSkew(tilt, phi, timg, mask, Ai, h, w, GpuImgUp, GpuMaskUp, GpuImgDown, GpuMaskDown);

            cv::Mat img_disp;
            //cv::cuda::bitwise_and(timg, timg, img_disp, mask);
            cv::bitwise_and(timg, timg, img_disp, mask);

            m_siftGpuDetector->RunSIFT(img_disp.cols,img_disp.rows,img_disp.data, GL_LUMINANCE, GL_UNSIGNED_BYTE);
            //m_siftGpuDetector->RunSIFT(img_disp.cols,img_disp.rows,img_disp.data,GL_RGB,GL_UNSIGNED_BYTE);

            auto num1 = m_siftGpuDetector->GetFeatureNum();
            vector<float> des(128 * num1);
            vector<SiftGPU::SiftKeypoint> SiftGPUkeypoints(num1);
            m_siftGpuDetector->GetFeatureVector(&SiftGPUkeypoints[0],&des[0]);
            // Trans to Mat
            cv::Mat m(des);
            descriptors = m.reshape(1,num1).clone();


            for(const SiftGPU::SiftKeypoint &kp : SiftGPUkeypoints){
                cv::Point3f kpt(kp.x, kp.y, 1);
                cv::Mat kpt_t = Ai*cv::Mat(kpt);
                float x = kpt_t.at<float>(0,0);
                float y = kpt_t.at<float>(1,0);
                //越界判断
                if ((x < 0) || (y < 0 || (x > img.cols-1) || (y > img.rows-1)))
                {
                    num++;
                    if (x < 0)
                        x = 0;
                    if (y < 0)
                        y = 0;
                    if (x > img.cols-1)
                        x = img.cols-1;
                    if (y > img.rows-1)
                        y = img.rows-1;
                }
                cv::KeyPoint temp(x,y,kp.s,kp.o);
                kpts.push_back(temp);
            }

            kptsAll.insert(kptsAll.end(), kpts.begin(), kpts.end());
            descAll.push_back(descriptors);
        }
        std::cout <<"affineSkew num:::" << num << std::endl;

    }

    void transToRootSift(const cv::Mat &siftFeature,cv::Mat &rootSiftFeature){
        for(int i = 0; i < siftFeature.rows; i ++){
            // Conver to float type
            cv::Mat f;
            siftFeature.row(i).convertTo(f,CV_32FC1);

            cv::normalize(f,f,1,0,cv::NORM_L1); // l1 normalize
            sqrt(f,f); // sqrt-root  root-sift
            rootSiftFeature.push_back(f);
        }
    }

    int gpuMatch(const cv::Mat &des1,const cv::Mat &des2){

        //m_siftGpuMatcher->SetMaxSift();

        m_siftGpuMatcher->SetDescriptors(0,des1.rows,des1.data);
        m_siftGpuMatcher->SetDescriptors(1,des2.rows,des2.data);

        int (*match_buf)[2] = new int[m_maxMatch][2];

        auto matchNum = m_siftGpuMatcher->GetSiftMatch(m_maxMatch,match_buf);

        delete[] match_buf;

        return matchNum;
    }

    void setMaxSift(int max_sift){
        m_siftGpuMatcher->SetMaxSift(max_sift);
        m_maxMatch = max_sift;
    }

    int gpuMatch(const cv::Mat &des1,const cv::Mat &des2,vector<cv::DMatch>& matches){

        m_siftGpuMatcher->SetDescriptors(0,des1.rows,(float*)des1.data);
        m_siftGpuMatcher->SetDescriptors(1,des2.rows,(float*)des2.data);

        int (*match_buf)[2] = new int[m_maxMatch][2];

        std::cout<<"m_maxMatch "<< m_maxMatch << std::endl;

        auto matchNum = m_siftGpuMatcher->GetSiftMatch(5000,match_buf);

        for(int i = 0 ;i  < matchNum; i ++) {
            cv::DMatch dm(match_buf[i][0],match_buf[i][1],0);
            matches.push_back(dm);
        }

        delete[] match_buf;
        return matchNum;
    }

private:
    SiftGPU *m_siftGpuDetector;
    SiftMatchGPU *m_siftGpuMatcher;

    int m_maxMatch;
};


cv::Mat LeftUpRightDownDrawInlier(const cv::Mat &queryImage, const cv::Mat &objectImage,
                                  const vector<cv::Point2f> &queryCoord, const vector<cv::Point2f> &objectCoord, std::string filename)
{
    cv::Size sz = cv::Size(queryImage.size().width + objectImage.size().width,
                           queryImage.size().height + objectImage.size().height);
    cv::Mat matchingImage = cv::Mat::zeros(sz, CV_8UC3);

    // 设置matchingImage的感兴趣区域大小并赋予原图
    cv::Mat roi1 = cv::Mat(matchingImage, cv::Rect(0, 0, queryImage.size().width, queryImage.size().height));
    queryImage.copyTo(roi1);
    cv::Mat roi2 = cv::Mat(matchingImage, cv::Rect(queryImage.size().width, queryImage.size().height, objectImage.size().width, objectImage.size().height));
    objectImage.copyTo(roi2);

    //画出点
    for (int i = 0; i < (int)queryCoord.size(); ++i) {
        cv::Point2f pt1 = queryCoord[i];
        cv::Point2f pt2 = objectCoord[i];
        cv::Point2f from = pt1;
        cv::Point2f to = cv::Point(pt2.x + queryImage.size().width, pt2.y + queryImage.size().height);
        line(matchingImage, from, to, cv::Scalar(0, 255, 255));
    }
    cv::imwrite(filename, matchingImage);
    return matchingImage;
}

/*
 * Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
 * Input:
 *  A: Nxm numpy array of corresponding points
 *  B: Nxm numpy array of corresponding points
 * Returns:
 *  T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
 *  R: mxm rotation matrix
 *  t: mx1 translation vector
 *  //https://blog.csdn.net/kewei9/article/details/74157236
 */
void best_fit_transform(std::vector<cv::Point3f> A, std::vector<cv::Point3f> B, cv::Mat & T, cv::Mat & R, cv::Mat & t)
{
    if (A.size()!=B.size())
    {
        std::cout <<"error:::" << "A.size()!=B.size()" << std::endl;
    }
    int pointsNum = A.size();

    //# translate points to their centroids
    double srcSumX = 0.0f;
    double srcSumY = 0.0f;
    double srcSumZ = 0.0f;

    double dstSumX = 0.0f;
    double dstSumY = 0.0f;
    double dstSumZ = 0.0f;

    for (int i = 0; i < pointsNum; ++ i)
    {
        srcSumX += A[i].x;
        srcSumY += A[i].y;
        srcSumZ += A[i].z;

        dstSumX += B[i].x;
        dstSumY += B[i].y;
        dstSumZ += B[i].z;
    }

    cv::Point3f centerSrc, centerDst;

    centerSrc.x = float(srcSumX / pointsNum);
    centerSrc.y = float(srcSumY / pointsNum);
    centerSrc.z = float(srcSumZ / pointsNum);

    centerDst.x = float(dstSumX / pointsNum);
    centerDst.y = float(dstSumY / pointsNum);
    centerDst.z = float(dstSumZ / pointsNum);

    int m  = 3;
    cv::Mat srcMat(m, pointsNum, CV_32FC1);
    cv::Mat dstMat(m, pointsNum, CV_32FC1);

    float* srcDat = (float*)(srcMat.data);
    float* dstDat = (float*)(dstMat.data);
    for (int i = 0; i < pointsNum; ++ i)
    {
        srcDat[i] = A[i].x - centerSrc.x;
        srcDat[pointsNum + i] = A[i].y - centerSrc.y;
        srcDat[pointsNum * 2 + i] = A[i].z - centerSrc.z;

        dstDat[i] = B[i].x - centerDst.x;
        dstDat[pointsNum + i] = B[i].y - centerDst.y;
        dstDat[pointsNum * 2 + i] = B[i].z - centerDst.z;
    }

    //# rotation matrix
    cv::Mat matS = srcMat * dstMat.t();

    cv::Mat matU, matW, matV;
    cv::SVDecomp(matS, matW, matU, matV);
    R = matV.t() * matU.t();

    //# special reflection case
    double det = cv::determinant(R);
    if (det < 0)
    {
        //https://blog.csdn.net/xiaowei_cqu/article/details/19839019
        float* data= matV.ptr<float>(m-1);
        for (int i=0; i<matV.cols; i++) {
            *data++= (*data * (-1));
        }
        R = matV.t() * matU.t();
    }

    //t = centroid_B.T - np.dot(R, centroid_A.T)
    //# translation
    //Mat(centerDst)=3*1
    t = cv::Mat(centerDst) - (R * cv::Mat(centerSrc));

    //T = np.identity(m + 1)
    //T[:m, :m] = R
    //T[:m, m] = t
    //# homogeneous transformation
    double datM[] = {1, 0, 0, 0,   0, 1, 0, 0,   0, 0, 1, 0,   0, 0, 0, 1};
    cv::Mat matM(4, 4, CV_32FC1, datM);
    T = matM.clone();

}


int main()
{
    //http://www.kind-of-works.com/WANGFAN_site_data/GPU-ASIFT.html
    //初始化opencv-gpu
    int flag = cv::cuda::getCudaEnabledDeviceCount();
    if (flag != 0) { cv::cuda::setDevice(0); }


    cv::Mat OriginalGrayImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_Texture_8Bit_48.png", -1);
    cv::Mat grayImg1;
    cv::cvtColor(OriginalGrayImage, grayImg1, CV_BGR2GRAY);
    assert(grayImg1.channels() == 1); // RGB

    cv::Mat targetGrayImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_Texture_8Bit_52.png", -1);
    cv::Mat grayImg2;
    cv::cvtColor(targetGrayImage, grayImg2, CV_BGR2GRAY);
    assert(grayImg1.channels() == 1); // RGB

    ASiftDetector asiftDetector;
    asiftDetector.create();

    clock_t begin = clock();
    vector<cv::KeyPoint> asiftKeypoints_query;
    cv::Mat asiftDescriptors_query;
    asiftDetector.detectAndCompute(grayImg1, asiftKeypoints_query, asiftDescriptors_query);
    vector<cv::KeyPoint> asiftKeypoints_object;
    cv::Mat asiftDescriptors_object;
    asiftDetector.detectAndCompute(grayImg2, asiftKeypoints_object, asiftDescriptors_object);
    clock_t end = clock();
    std::cout<<"Running time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;

    std::cout<<"keypoints num: "<<asiftKeypoints_query.size()<<std::endl;
    std::cout<<"keypoints num: "<<asiftKeypoints_object.size()<<std::endl;


//    begin = clock();
//    cv::Ptr<cv::cuda::DescriptorMatcher> matcher = cv::cuda::DescriptorMatcher::createBFMatcher(cv::NORM_L2);
//    vector<cv::DMatch> matches_all;
//    GpuMat gd1(asiftDescriptors_query), gd2(asiftDescriptors_object);
//    end = clock();
//    std::cout<<"up time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;
//
//    begin = clock();
//	matcher->match(gd1, gd2, matches_all);
//    end = clock();
//    std::cout<<"match time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;

    begin = clock();
    cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("FlannBased");
    vector<vector<cv::DMatch> > matches;
    matcher->knnMatch(asiftDescriptors_query,asiftDescriptors_object, matches, 2);

    std::vector<cv::Point2f> queryPoints;
    std::vector<cv::Point2f> trainPoints;
    vector<cv::DMatch> goodMatches;
    //https://blog.csdn.net/linxihe123/article/details/70173476
    vector<std::pair<cv::KeyPoint,cv::KeyPoint> >kp_pairs_temp;
    for (size_t i = 0; i < matches.size(); i++)
    {
        if ((matches[i][0].distance < 0.75 * matches[i][1].distance) && (matches[i].size()==2))
        {
            cv::KeyPoint temp1 = asiftKeypoints_query[matches[i][0].queryIdx];
            cv::KeyPoint temp2 = asiftKeypoints_object[matches[i][0].trainIdx];
            goodMatches.push_back(matches[i][0]);
            queryPoints.push_back(temp1.pt);
            trainPoints.push_back(temp2.pt);
            kp_pairs_temp.push_back(std::make_pair(temp1,temp2));
        }
    }
    end = clock();
    std::cout<<"match time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;

    cv::Mat img_RR_matches;
    std::string filename1 = "result_1.png";
    LeftUpRightDownDrawInlier(OriginalGrayImage, targetGrayImage, queryPoints, trainPoints, filename1);

    // 4点条件判断
    const int minNumberMatchesAllowed = 4;
    if (queryPoints.size() < minNumberMatchesAllowed)
        return false;

    double reprojectionThreshold=20.0;
    std::vector<unsigned char> inliersMask(goodMatches.size());
    cv::Mat homography = cv::findHomography(queryPoints,
                                            trainPoints,
                                            cv::FM_RANSAC,
                                            reprojectionThreshold,
                                            inliersMask);

    vector<std::pair<cv::KeyPoint,cv::KeyPoint> >kp_pairs;
    std::vector<cv::Point2f> newp1;
    std::vector<cv::Point2f> newp2;
    vector<cv::DMatch> goodgoodMatches;
    for (size_t i=0; i<inliersMask.size(); i++)
    {
        if (inliersMask[i])
        {
            goodgoodMatches.push_back(goodMatches[i]);
            kp_pairs.push_back(kp_pairs_temp[i]);
            newp1.push_back(kp_pairs_temp[i].first.pt);
            newp2.push_back(kp_pairs_temp[i].second.pt);
        }

    }
    std::string filename2 = "result_2.png";
    LeftUpRightDownDrawInlier(OriginalGrayImage, targetGrayImage, newp1, newp2, filename2);

    std::vector<unsigned char> EssentialMask(newp1.size());
    cv::Mat intrinsics = (cv::Mat_<float>(3, 3) << 2269.16, 0, 1065.54, 0, 2268.4, 799.032, 0, 0, 1);
    cv::Mat Essential = cv::findEssentialMat(newp1, newp2, intrinsics, cv::RANSAC, 0.999, 20, EssentialMask);

    cv::Mat OriginalDepthImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_DepthMap_48.tif", -1);
    cv::Mat targetDepthImage = cv::imread("/home/spple/CLionProjects/SIFT-GPU/data/OtherSampleFrame_IMG_DepthMap_52.tif", -1);

    std::vector<cv::Point3f> trp1;
    std::vector<cv::Point3f> trp2;
    std::vector<cv::Point2f> trp1_temp;
    std::vector<cv::Point2f> trp2_temp;
    for (size_t key=0; key<newp1.size(); key++)
    {
        float x1 = newp1[key].x;
        float y1 = newp1[key].y;
        float x2 = newp2[key].x;
        float y2 = newp2[key].y;
        float d1 = OriginalDepthImage.at<float>(int(y1),int(x1));
        cv::Point3f p1;
        p1.z = float(d1) / intrinsics.ptr<float>(2)[2];
        p1.x = (x1 - intrinsics.ptr<float>(0)[2]) * p1.z / intrinsics.ptr<float>(0)[0];
        p1.y = (y1 - intrinsics.ptr<float>(1)[2]) * p1.z / intrinsics.ptr<float>(1)[1];

        float d2 = targetDepthImage.at<float>(int(y2),int(x2));
        cv::Point3f p2;
        p2.z = float(d2) / intrinsics.ptr<float>(2)[2];
        p2.x = (x2 - intrinsics.ptr<float>(0)[2]) * p2.z / intrinsics.ptr<float>(0)[0];
        p2.y = (y2 - intrinsics.ptr<float>(1)[2]) * p2.z / intrinsics.ptr<float>(1)[1];

        //>0.001是为了去除深度为0的特征点
        if(EssentialMask[key] and ((abs(p1.x) + abs(p1.y) + abs(p1.z)) > 0.001) and ((abs(p2.x) + abs(p2.y) + abs(p2.z)) > 0.001))
        {
            trp1.push_back(p1);
            trp2.push_back(p2);

            trp1_temp.push_back(newp1[key]);
            trp2_temp.push_back(newp2[key]);
        }
    }

    std::string filename3 = "result_3.png";
    LeftUpRightDownDrawInlier(OriginalGrayImage, targetGrayImage, trp1_temp, trp2_temp, filename3);

    cv::Mat T, R, t;
    best_fit_transform(trp1, trp2, T, R, t);


    // The point clouds we will be using
    PointCloudT::Ptr cloud_in (new PointCloudT);  // Original point cloud
    PointCloudT::Ptr cloud_in_48 (new PointCloudT);  // Original point cloud
    PointCloudT::Ptr cloud_tr (new PointCloudT);  // Transformed point cloud
    PointCloudT::Ptr cloud_icp (new PointCloudT);  // ICP output point cloud

    float leafsize = 2;
    int iterations = 10;  // Default number of ICP iterations

    pcl::console::TicToc time;
    time.tic ();
    if (pcl::io::loadPLYFile ("/home/spple/CLionProjects/python_asift_gpu/save_ply/SampleFrame_52.ply", *cloud_in) < 0)
    {
        PCL_ERROR ("Error loading cloud %s.\n", "SampleFrame_52.ply");
        return (-1);
    }
    std::cout << "\nLoaded file " << "SampleFrame_52.ply" << " (" << cloud_in->size () << " points) in " << time.toc () << " ms\n" << std::endl;

    time.tic ();
    if (pcl::io::loadPLYFile ("/home/spple/CLionProjects/python_asift_gpu/save_ply/SampleFrame_48.ply", *cloud_in_48) < 0)
    {
        PCL_ERROR ("Error loading cloud %s.\n", "SampleFrame_48.ply");
        return (-1);
    }
    std::cout << "\nLoaded file " << "SampleFrame_48.ply" << " (" << cloud_in_48->size () << " points) in " << time.toc () << " ms\n" << std::endl;

    if (leafsize > 0)
    {
        voxel_grid_filter(cloud_in, leafsize);
        voxel_grid_filter(cloud_in_48, leafsize);
    }

    time.tic ();
    std::cout << "\nLoaded file " << "SampleFrame_52.ply" << " (" << cloud_in_48->size () << " points) in " << time.toc () << " ms\n" << std::endl;

    // Defining a rotation matrix and translation vector
    Eigen::Matrix4d transformation_matrix = Eigen::Matrix4d::Identity ();

    // A rotation matrix (see https://en.wikipedia.org/wiki/Rotation_matrix)
    transformation_matrix (0, 0) = R.at<float>(0,0);
    transformation_matrix (0, 1) = R.at<float>(0,1);
    transformation_matrix (0, 2) = R.at<float>(0,2);

    transformation_matrix (1, 0) = R.at<float>(1,0);
    transformation_matrix (1, 1) = R.at<float>(1,1);
    transformation_matrix (1, 2) = R.at<float>(1,2);

    transformation_matrix (2, 0) = R.at<float>(2,0);
    transformation_matrix (2, 1) = R.at<float>(2,1);
    transformation_matrix (2, 2) = R.at<float>(2,2);

    transformation_matrix (0, 3) = t.at<float>(0,0);
    transformation_matrix (1, 3) = t.at<float>(0,1);
    transformation_matrix (2, 3) = t.at<float>(0,2);

    // Display in terminal the transformation matrix
    std::cout << "Applying this rigid transformation to: cloud_in -> cloud_icp" << std::endl;
    print4x4Matrix (transformation_matrix);

    //把48(cloud_in_48)通过初始Rt矩阵进行变换,变换后的48‘(cloud_in)为原点云,去匹配目标点云52(cloud_in)
    // Executing the transformation
    pcl::transformPointCloudWithNormals (*cloud_in_48, *cloud_icp, transformation_matrix);
    //pcl::transformPointCloud (*cloud_in_48, *cloud_icp, transformation_matrix);
    *cloud_tr = *cloud_icp;  // We backup cloud_icp into cloud_tr for later use

    // The Iterative Closest Point algorithm
    time.tic ();

//    pcl::IterativeClosestPointWithNormals<pcl::PointXYZRGBNormal, pcl::PointXYZRGBNormal>::Ptr icp ( new pcl::IterativeClosestPointWithNormals<pcl::PointXYZRGBNormal, pcl::PointXYZRGBNormal> () );
//    icp->setMaximumIterations ( iterations );
//    icp->setInputSource ( cloud_icp ); // not cloud_source, but cloud_source_trans!
//    icp->setInputTarget ( cloud_in );
//    icp->setMaxCorrespondenceDistance ( 5 );
//    icp->align ( *cloud_icp );
//    icp->setMaximumIterations (1);

    pcl::IterativeClosestPoint<PointT, PointT> icp;
    icp.setMaximumIterations (iterations);
    icp.setMaxCorrespondenceDistance(5);
    icp.setInputSource (cloud_icp);
    icp.setInputTarget (cloud_in);
    icp.align (*cloud_icp);
    icp.setMaximumIterations (1);  // We set this variable to 1 for the next time we will call .align () function

    std::cout << "Applied " << iterations << " ICP iteration(s) in " << time.toc () << " ms" << std::endl;

    if (icp.hasConverged ())
    {
        std::cout << "\nICP has converged, score is " << icp.getFitnessScore () << std::endl;
        std::cout << "\nICP transformation " << iterations << " : cloud_icp -> cloud_in" << std::endl;
        transformation_matrix = icp.getFinalTransformation ().cast<double>();
        print4x4Matrix (transformation_matrix);
    }
    else
    {
        PCL_ERROR ("\nICP has not converged.\n");
        return (-1);
    }

    // Visualization
    boost::shared_ptr< pcl::visualization::PCLVisualizer > viewer(new pcl::visualization::PCLVisualizer("ICP demo"));
    //初始化相机参数
    viewer->initCameraParameters ();

    // Create two vertically separated viewports
    int v1 (0);
    int v2 (1);
    viewer->createViewPort (0.0, 0.0, 0.5, 1.0, v1);
    viewer->createViewPort (0.5, 0.0, 1.0, 1.0, v2);

    // The color we will be using
    float bckgr_gray_level = 0.0;  // Black
    float txt_gray_lvl = 1.0 - bckgr_gray_level;

    // Original point cloud is white
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_in_color_h (cloud_in, (int) 255 * txt_gray_lvl, (int) 255 * txt_gray_lvl,
                                                                               (int) 255 * txt_gray_lvl);
    viewer->addPointCloud (cloud_in, cloud_in_color_h, "cloud_in_v1", v1);
    viewer->addPointCloud (cloud_in, cloud_in_color_h, "cloud_in_v2", v2);

    // Transformed point cloud is green
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_tr_color_h (cloud_tr, 20, 180, 20);
    viewer->addPointCloud (cloud_tr, cloud_tr_color_h, "cloud_tr_v1", v1);

    // Transformed point cloud is blue
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_in48_color_h (cloud_in_48, 20, 20, 180);
    viewer->addPointCloud (cloud_in_48, cloud_in48_color_h, "cloud_in48_v1", v1);


    // ICP aligned point cloud is red
    pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_icp_color_h (cloud_icp, 180, 20, 20);
    viewer->addPointCloud (cloud_icp, cloud_icp_color_h, "cloud_icp_v2", v2);

    // Adding text descriptions in each viewport
    viewer->addText ("White: Original point cloud\nGreen: Matrix transformed point cloud", 10, 15, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "icp_info_1", v1);
    viewer->addText ("White: Original point cloud\nRed: ICP aligned point cloud", 10, 15, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "icp_info_2", v2);

    std::stringstream ss;
    ss << iterations;
    std::string iterations_cnt = "ICP iterations = " + ss.str ();
    viewer->addText (iterations_cnt, 10, 60, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "iterations_cnt", v2);

    // Set background color
    viewer->setBackgroundColor (bckgr_gray_level, bckgr_gray_level, bckgr_gray_level, v1);
    viewer->setBackgroundColor (bckgr_gray_level, bckgr_gray_level, bckgr_gray_level, v2);

    //添加坐标系
    viewer->addCoordinateSystem(1.0, "reference");


    // Set camera position and orientation //设置坐标原点
    //https://www.cnblogs.com/ghjnwk/p/10305796.html
    //鼠标左键,鼠标右键,鼠标滚轮,按住滚轮不放
    viewer->setCameraPosition (0, 0, 0, 0, 0, 0, 0);
    viewer->setSize (2048, 2048);  // Visualiser window size

    // Register keyboard callback :
    viewer->registerKeyboardCallback (&keyboardEventOccurred, (void*) NULL);

    // Display the visualiser
    while (!viewer->wasStopped ())
    {
        viewer->spinOnce ();

        // The user pressed "space" :
        if (next_iteration)
        {
            // The Iterative Closest Point algorithm
            time.tic ();
            icp.align (*cloud_icp);
            std::cout << "Applied 1 ICP iteration in " << time.toc () << " ms" << std::endl;

            if (icp.hasConverged ())
            {
                printf ("\033[11A");  // Go up 11 lines in terminal output.
                printf ("\nICP has converged, score is %+.0e\n", icp.getFitnessScore ());
                std::cout << "\nICP transformation " << ++iterations << " : cloud_icp -> cloud_in" << std::endl;
                transformation_matrix *= icp.getFinalTransformation ().cast<double>();  // WARNING /!\ This is not accurate! For "educational" purpose only!
                print4x4Matrix (transformation_matrix);  // Print the transformation between original pose and current pose

                ss.str ("");
                ss << iterations;
                std::string iterations_cnt = "ICP iterations = " + ss.str ();
                viewer->updateText (iterations_cnt, 10, 60, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "iterations_cnt");
                viewer->updatePointCloud (cloud_icp, cloud_icp_color_h, "cloud_icp_v2");
            }
            else
            {
                PCL_ERROR ("\nICP has not converged.\n");
                return (-1);
            }
        }
        next_iteration = false;
    }


    return 1;
}

 

 

Python-C扩展联动:

sudo vim /etc/ld.so.conf.d/opencv.conf

/media/spple/新加卷1/opencv-3.4.1/install_4/lib

/home/spple/CLionProjects/SIFT-GPU/bin

sudo ldconfig -v

python setup.py build

python setup.py install --record log.txt

cat log.txt | xargs rm -rf

steup.py

from setuptools import setup
from torch.utils.cpp_extension import CppExtension, BuildExtension
import os

siftgpu_inc_dir = '/home/spple/CLionProjects/SIFT-GPU/src/SiftGPU'
siftgpu_lib_dir = '/home/spple/CLionProjects/SIFT-GPU/bin'

opencv_inc_dir = '/media/spple/新加卷1/opencv-3.4.1/install_4/include'
opencv_lib_dir = '/media/spple/新加卷1/opencv-3.4.1/install_4/lib'

# opencv_inc_dir = '/usr/include'
# opencv_lib_dir = '/usr/lib/x86_64-linux-gnu'

if len(siftgpu_inc_dir) == 0:
	print("Error: You have to provide an siftgpu include directory. Edit this file.")
	exit()
if len(siftgpu_lib_dir) == 0:
	print("Error: You have to provide an siftgpu library directory. Edit this file.")
	exit()

if len(opencv_inc_dir) == 0:
	print("Error: You have to provide an opencv include directory. Edit this file.")
	exit()
if len(opencv_lib_dir) == 0:
	print("Error: You have to provide an opencv library directory. Edit this file.")
	exit()
 
setup(
	name='siftgpu',
	ext_modules=[CppExtension(
		name='siftgpu',
		sources=['siftgpu.cpp'],
		include_dirs=[siftgpu_inc_dir, opencv_inc_dir],
		library_dirs=[siftgpu_lib_dir, opencv_lib_dir],
		libraries=['siftgpu', 'opencv_core', 'opencv_calib3d', 'opencv_imgproc', 'opencv_imgcodecs', 'opencv_highgui', 'opencv_features2d', 'opencv_cudawarping','opencv_cudafeatures2d'],
		extra_compile_args=['-fopenmp']
		)],		
	cmdclass={'build_ext': BuildExtension})

siftgpu.cpp

#include <Python.h>
#include <numpy/arrayobject.h>

#include <vector>
#include <ctime>
#include <string>
#include <iostream>
using std::vector;
using std::iostream;

#include "SiftGPU.h"
#include "GL/gl.h"

#include "opencv2/core/core.hpp"
#include "opencv2/imgcodecs/imgcodecs.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/cudawarping.hpp"
#include "opencv2/cudafilters.hpp"
using cv::cuda::GpuMat;

class ASiftDetector{

    enum InitStatus{
        INIT_OK,
        INIT_IS_NOT_SUPPORT,
        INIT_VERIFY_FAILED
    };

public:
    ASiftDetector() = default;
    ~ASiftDetector() {
        if(m_siftGpuDetector)
        {
            m_siftGpuDetector->DestroyContextGL();
            delete m_siftGpuDetector;
        }
        if(m_siftGpuMatcher)
        {
            m_siftGpuMatcher->DestroyContextGL();
            delete m_siftGpuMatcher;
        }
    }
    InitStatus create(){
        m_siftGpuDetector = new SiftGPU();

//        char* myargv[4] = {"-fo","-1","-v","1"};
//        m_siftGpuDetector->ParseParam(4,myargv);
        char* myargv[6] = {"-fo","-1","-v","1","-cuda","0"};
        m_siftGpuDetector->ParseParam(6, myargv);

        // Set edge threshold, dog threshold

        if(m_siftGpuDetector->CreateContextGL() != SiftGPU::SIFTGPU_FULL_SUPPORTED){
            std::cerr << "SiftGPU is not supported!" << std::endl;
            return InitStatus::INIT_IS_NOT_SUPPORT;
        }

        m_maxMatch = 50000;

        m_siftGpuMatcher = new SiftMatchGPU(m_maxMatch);
        m_siftGpuMatcher->VerifyContextGL();

        return INIT_OK;
    }

    void affineSkew(double tilt, double phi, cv::Mat& img, cv::Mat& mask, cv::Mat& Ai)
    {
        int h = img.rows;
        int w = img.cols;

        mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));

        cv::Mat A = cv::Mat::eye(2,3, CV_32F);

        if(phi != 0.0)
        {
            phi *= M_PI/180.;
            double s = sin(phi);
            double c = cos(phi);

            A = (cv::Mat_<float>(2,2) << c, -s, s, c);

            cv::Mat corners = (cv::Mat_<float>(4,2) << 0, 0, w, 0, w, h, 0, h);
            cv::Mat tcorners = corners*A.t();
            cv::Mat tcorners_x, tcorners_y;
            tcorners.col(0).copyTo(tcorners_x);
            tcorners.col(1).copyTo(tcorners_y);
            std::vector<cv::Mat> channels;
            channels.push_back(tcorners_x);
            channels.push_back(tcorners_y);
            merge(channels, tcorners);

            cv::Rect rect = boundingRect(tcorners);
            A =  (cv::Mat_<float>(2,3) << c, -s, -rect.x, s, c, -rect.y);

            cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
        }
        if(tilt != 1.0)
        {
            double s = 0.8*sqrt(tilt*tilt-1);
            cv::GaussianBlur(img, img, cv::Size(0,0), s, 0.01);
            cv::resize(img, img, cv::Size(0,0), 1.0/tilt, 1.0, cv::INTER_NEAREST);
            A.row(0) = A.row(0)/tilt;
        }
        if(tilt != 1.0 || phi != 0.0)
        {
            h = img.rows;
            w = img.cols;
            cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
        }
        invertAffineTransform(A, Ai);
    }

    void detectAndCompute(const cv::Mat &img, vector<cv::KeyPoint> &kptsAll, cv::Mat &descAll){

        assert(img.channels() == 1);
        //affine参数
        vector<vector<double>> params;
        vector<double> temp;
        temp.push_back(1.0);
        temp.push_back(0.0);
        params.push_back(temp);
        for(int tl = 1; tl < 6; tl++) {
            double t = pow(2, 0.5 * tl);
            for (double phi = 0; phi < 180; phi += 72.0/t) {
                temp.clear();
                temp.push_back(t);
                temp.push_back(phi);
                params.push_back(temp);
            }
        }

        kptsAll.clear();
        descAll = cv::Mat(0, 128, CV_32F);
        int num = 0;

        for(int aff = 0; aff < params.size(); aff++)
        {
            double tilt = params[aff][0];
            double phi = params[aff][1];

            cv::Mat descriptors;
            vector<cv::KeyPoint> kpts;

            cv::Mat timg, mask, Ai;
            img.copyTo(timg);

            affineSkew(tilt, phi, timg, mask, Ai);
            cv::Mat img_disp;
            cv::bitwise_and(timg, timg, img_disp, mask);

            m_siftGpuDetector->RunSIFT(img_disp.cols,img_disp.rows,img_disp.data, GL_LUMINANCE, GL_UNSIGNED_BYTE);
            //m_siftGpuDetector->RunSIFT(img_disp.cols,img_disp.rows,img_disp.data,GL_RGB,GL_UNSIGNED_BYTE);

            auto num1 = m_siftGpuDetector->GetFeatureNum();
            vector<float> des(128 * num1);
            vector<SiftGPU::SiftKeypoint> SiftGPUkeypoints(num1);
            m_siftGpuDetector->GetFeatureVector(&SiftGPUkeypoints[0],&des[0]);
            // Trans to Mat
            cv::Mat m(des);
            descriptors = m.reshape(1,num1).clone();


            for(const SiftGPU::SiftKeypoint &kp : SiftGPUkeypoints){
                cv::Point3f kpt(kp.x, kp.y, 1);
                cv::Mat kpt_t = Ai*cv::Mat(kpt);
                float x = kpt_t.at<float>(0,0);
                float y = kpt_t.at<float>(1,0);
                //越界判断
                if ((x < 0) || (y < 0 || (x > img.cols-1) || (y > img.rows-1)))
                {
                    num++;
                    if (x < 0)
                        x = 0;
                    if (y < 0)
                        y = 0;
                    if (x > img.cols-1)
                        x = img.cols-1;
                    if (y > img.rows-1)
                        y = img.rows-1;
                }
                cv::KeyPoint temp(x,y,kp.s,kp.o);
                kpts.push_back(temp);
            }

            kptsAll.insert(kptsAll.end(), kpts.begin(), kpts.end());
            descAll.push_back(descriptors);
        }
        std::cout <<"affineSkew num:::" << num << std::endl;

    }

    void transToRootSift(const cv::Mat &siftFeature,cv::Mat &rootSiftFeature){
        for(int i = 0; i < siftFeature.rows; i ++){
            // Conver to float type
            cv::Mat f;
            siftFeature.row(i).convertTo(f,CV_32FC1);

            cv::normalize(f,f,1,0,cv::NORM_L1); // l1 normalize
            sqrt(f,f); // sqrt-root  root-sift
            rootSiftFeature.push_back(f);
        }
    }

    int gpuMatch(const cv::Mat &des1,const cv::Mat &des2){

        //m_siftGpuMatcher->SetMaxSift();

        m_siftGpuMatcher->SetDescriptors(0,des1.rows,des1.data);
        m_siftGpuMatcher->SetDescriptors(1,des2.rows,des2.data);

        int (*match_buf)[2] = new int[m_maxMatch][2];

        auto matchNum = m_siftGpuMatcher->GetSiftMatch(m_maxMatch,match_buf);

        delete[] match_buf;

        return matchNum;
    }

    void setMaxSift(int max_sift){
        m_siftGpuMatcher->SetMaxSift(max_sift);
        m_maxMatch = max_sift;
    }

    int gpuMatch(const cv::Mat &des1,const cv::Mat &des2,vector<cv::DMatch>& matches){

        m_siftGpuMatcher->SetDescriptors(0,des1.rows,(float*)des1.data);
        m_siftGpuMatcher->SetDescriptors(1,des2.rows,(float*)des2.data);

        int (*match_buf)[2] = new int[m_maxMatch][2];

        std::cout<<"m_maxMatch "<< m_maxMatch << std::endl;

        auto matchNum = m_siftGpuMatcher->GetSiftMatch(5000,match_buf);

        for(int i = 0 ;i  < matchNum; i ++) {
            cv::DMatch dm(match_buf[i][0],match_buf[i][1],0);
            matches.push_back(dm);
        }

        delete[] match_buf;
        return matchNum;
    }

private:
    SiftGPU *m_siftGpuDetector;
    SiftMatchGPU *m_siftGpuMatcher;

    int m_maxMatch;
};

static PyObject * siftgpu_match(PyObject *self, PyObject *args) {

    PyObject *size1, *size2;
    PyArrayObject *image1, *image2;

//    if (!PyArg_ParseTuple(args, "O!O!", &PyTuple_Type, &size, &PyArray_Type, &image)) {
//        std::cout << "111111111" << std::endl;
//        //包装函数返回NULL,就会在Python调用中产生一个TypeError的异常
//        return NULL;
//    }
    if (!PyArg_ParseTuple(args, "OOOO", &size1, &image1, &size2, &image2)) {
        return NULL;
    }
    int rows1 = PyLong_AsLong(PyTuple_GetItem(size1 ,0));
    int cols1 = PyLong_AsLong(PyTuple_GetItem(size1 ,1));
    char my_arr1[rows1 * cols1];
    for(size_t length = 0; length<(rows1 * cols1); length++){
        my_arr1[length] = (*(char *)PyArray_GETPTR1(image1, length));
    }
    int rows2 = PyLong_AsLong(PyTuple_GetItem(size2 ,0));
    int cols2 = PyLong_AsLong(PyTuple_GetItem(size2 ,1));
    char my_arr2[rows2 * cols2];
    for(size_t length = 0; length<(rows2 * cols2); length++){
        my_arr2[length] = (*(char *)PyArray_GETPTR1(image2, length));
    }


    cv::Mat grayImg1 = cv::Mat(cv::Size(cols1, rows1), CV_8UC1, &my_arr1);
    cv::Mat grayImg2 = cv::Mat(cv::Size(cols2, rows2), CV_8UC1, &my_arr2);

    ASiftDetector asiftDetector;
    asiftDetector.create();
    clock_t begin = clock();
    vector<cv::KeyPoint> asiftKeypoints_query;
    cv::Mat asiftDescriptors_query;
    asiftDetector.detectAndCompute(grayImg1, asiftKeypoints_query, asiftDescriptors_query);
    vector<cv::KeyPoint> asiftKeypoints_object;
    cv::Mat asiftDescriptors_object;
    asiftDetector.detectAndCompute(grayImg2, asiftKeypoints_object, asiftDescriptors_object);
    clock_t end = clock();
    std::cout<<"Running time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;


    std::cout<<"keypoints num: "<<asiftKeypoints_query.size()<<std::endl;
    std::cout<<"keypoints num: "<<asiftKeypoints_object.size()<<std::endl;

    begin = clock();
    cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("FlannBased");
    vector<vector<cv::DMatch> > matches;
    matcher->knnMatch(asiftDescriptors_query,asiftDescriptors_object, matches, 2);

    std::vector<cv::Point2f> queryPoints;
    std::vector<cv::Point2f> trainPoints;
    vector<cv::DMatch> goodMatches;
    //https://blog.csdn.net/linxihe123/article/details/70173476
    vector<std::pair<cv::KeyPoint,cv::KeyPoint> >kp_pairs_temp;
    for (size_t i = 0; i < matches.size(); i++)
    {
        if ((matches[i][0].distance < 0.75 * matches[i][1].distance) && (matches[i].size()==2))
        {
            cv::KeyPoint temp1 = asiftKeypoints_query[matches[i][0].queryIdx];
            cv::KeyPoint temp2 = asiftKeypoints_object[matches[i][0].trainIdx];
            goodMatches.push_back(matches[i][0]);
            queryPoints.push_back(temp1.pt);
            trainPoints.push_back(temp2.pt);
            kp_pairs_temp.push_back(std::make_pair(temp1,temp2));
        }
    }
    end = clock();
    std::cout<<"match time: "<<(double)(end-begin)/CLOCKS_PER_SEC*1000<<"ms"<<std::endl;

    PyObject* retval;
    retval = (PyObject *)Py_BuildValue("i", 1000);
    return retval;
}


static PyMethodDef
siftgpuMethods[] = {
    {"match", siftgpu_match, METH_VARARGS},
    {NULL, NULL},
};

static struct PyModuleDef siftgpuModule = {
    PyModuleDef_HEAD_INIT,
    "siftgpu",
    NULL,
    -1,
    siftgpuMethods
};

PyMODINIT_FUNC PyInit_siftgpu(void)
{
    return PyModule_Create(&siftgpuModule);
}

 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
课程目的:OpenCV是应用非常广泛的开源视觉处理库,在图像处理、计算机视觉和自动驾驶中有着非常重要的作用。课程设计特色:(课程当前为第一期)1、C++与Python双语教学Python语言是在计算机视觉中应用最多的一种语言,在工作中,深度学习模型的训练基本上都是使用Python语言编写的训练代码。OpenCV在这个过程中用于图像的预处理(例如图像读取、数据增强)和后处理,还可以用于显示处理的结果,功能强大,使用方便。但是在功能的部署的时候,不管是部署在服务端还是PC端,开发语言基本上用的是C++,所以如何有效的使用OpenCV进行模型或者功能的部署尤为重要。C++语言应用的好坏,在面试中可以看出一个面试者的工程实践能力的强弱,两种语言的开发掌握好了可以使工作如虎添翼。2、全模块讲解我出版了一本图书《学习OpenCV4:基于Python的算法实战》,虽然这本书是写的基于Python的算法实战,但是实际上这本书有详细的介绍算法的C++接口,还有一些C++方向的案例,是以Python为主。图书出版的时候就想双语写作,只是限于篇幅没有成行。本课程不仅采用双语教学,更是对C++的每个模块都做讲解,我们知道,很多的书其实只讲imgproc,如果你翻开一本书图像的形态学运算和图像滤波都是作为独立章节讲解的,那么这本书基本上就可以确定是只是讲解了imgproc模块,但是其他的模块在工作中也有很重要的作用。例如:core模块定义了C++的基本数据结构和基本运算(如四则运算);highgui模块是可视化与交互的模块;feature2d是特征点与特征匹配相关算法所在的模块;ml是机器学习相关的模块;dnn是深度学习相关的模块,可以使用OpenCV进行深度学习模型的部署。这些是很多的书和课程都不会讲的。3、讲解细致本课程会从环境搭建开始讲解,环境搭建尤为重要。从我多年的授课经验总结来看,如果只是给了代码,很多的入门用户环境问题处理不好的话,后面的学习很难进行下去,甚至会丧失学习的信心。4、会讲解C++和Python的开发语法问题是入门用户的一大难关,特别是C++语言。大学只是教授了C语言相关的内容,C++很多同学只懂一点皮毛,所以写代码步履维艰,我们在讲解代码的过程中会顺带讲解C++和Python的内容。我们还会讲解编译相关知识,还有库的装载与链接,这些是学校里不会教的,目前也几乎没有课程讲解。5、讲师经验丰富我讲解过C++和OpenCV的多个课程,广受学员好评。我出版过两本图书《深度学习计算机视觉实战》和《学习OpenCV4》,两本书都是细致入微的讲解,主要针对的就是初学者,所以能够很好的处理课程的难易程度。6、讲义准备充分讲义准备的充分细致,标识清楚明确,重点和疑难点突出。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值