PCL1.9.1和1.9.0版本会报错,记得添加这两个头文件
#include <pcl/visualization/keyboard_event.h>
#include <pcl/visualization/pcl_visualizer.h>void keyboardEventOccurred(const pcl::visualization::KeyboardEvent& event, void* nothing)
{
if (event.getKeySym () == "space" && event.keyDown ())
next_iteration = true;
}
viewer->registerKeyboardCallback (&keyboardEventOccurred, (void*) NULL);或者有的人这么写
void keyboardEventOccurred(const pcl::visualization::KeyboardEvent &event, void* nothing)
{
boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer = *static_cast<boost::shared_ptr<pcl::visualization::PCLVisualizer> *>(nothing);
if (event.getKeySym () == "space" && event.keyDown ())
next_iteration = true;
}
viewer->registerKeyboardCallback(&keyboardEventOccurred, (void*)&viewer);
RtT:
void best_fit_transform(std::vector<cv::Point3f> A, std::vector<cv::Point3f> B, cv::Mat & T, cv::Mat & R, cv::Mat & t)
{
if (A.size()!=B.size())
{
std::cout <<"error:::" << "A.size()!=B.size()" << std::endl;
}
int pointsNum = A.size();
//# translate points to their centroids
double srcSumX = 0.0f;
double srcSumY = 0.0f;
double srcSumZ = 0.0f;
double dstSumX = 0.0f;
double dstSumY = 0.0f;
double dstSumZ = 0.0f;
for (int i = 0; i < pointsNum; ++ i)
{
srcSumX += A[i].x;
srcSumY += A[i].y;
srcSumZ += A[i].z;
dstSumX += B[i].x;
dstSumY += B[i].y;
dstSumZ += B[i].z;
}
cv::Point3f centerSrc, centerDst;
centerSrc.x = float(srcSumX / pointsNum);
centerSrc.y = float(srcSumY / pointsNum);
centerSrc.z = float(srcSumZ / pointsNum);
centerDst.x = float(dstSumX / pointsNum);
centerDst.y = float(dstSumY / pointsNum);
centerDst.z = float(dstSumZ / pointsNum);
int m = 3;
cv::Mat srcMat(m, pointsNum, CV_32FC1);
cv::Mat dstMat(m, pointsNum, CV_32FC1);
float* srcDat = (float*)(srcMat.data);
float* dstDat = (float*)(dstMat.data);
for (int i = 0; i < pointsNum; ++ i)
{
srcDat[i] = A[i].x - centerSrc.x;
srcDat[pointsNum + i] = A[i].y - centerSrc.y;
srcDat[pointsNum * 2 + i] = A[i].z - centerSrc.z;
dstDat[i] = B[i].x - centerDst.x;
dstDat[pointsNum + i] = B[i].y - centerDst.y;
dstDat[pointsNum * 2 + i] = B[i].z - centerDst.z;
}
//# rotation matrix
cv::Mat matS = srcMat * dstMat.t();
cv::Mat matU, matW, matV;
cv::SVDecomp(matS, matW, matU, matV);
R = matV.t() * matU.t();
//# special reflection case
double det = cv::determinant(R);
if (det < 0)
{
//https://blog.csdn.net/xiaowei_cqu/article/details/19839019
float* data= matV.ptr<float>(m-1);
for (int i=0; i<matV.cols; i++) {
*data++= (*data * (-1));
}
R = matV.t() * matU.t();
}
//t = centroid_B.T - np.dot(R, centroid_A.T)
//# translation
//Mat(centerDst)=3*1
t = Mat(centerDst) - (R * Mat(centerSrc));
//T = np.identity(m + 1)
//T[:m, :m] = R
//T[:m, m] = t
//# homogeneous transformation
//https://blog.csdn.net/sysleo/article/details/96445786
//int a = R.type();
//int b = t.type();
float datM[] = {R.at<float>(0,0), R.at<float>(0,1), R.at<float>(0,2), t.at<float>(0,0),
R.at<float>(1,0), R.at<float>(1,1), R.at<float>(1,2), t.at<float>(1,0),
R.at<float>(2,0), R.at<float>(2,1), R.at<float>(2,2), t.at<float>(2,0),
0, 0, 0, 1};
cv::Mat matM(4, 4, CV_32FC1, datM);
T = matM.clone();
}
https://medium.com/@deepanshut041/introduction-to-orb-oriented-fast-and-rotated-brief-4220e8ec40cf
https://github.com/deepanshut041/feature-detection
https://ww2.mathworks.cn/help/vision/ref/extractfeatures.html
https://github.com/jl626/ORB-MATLAB
https://www.epfl.ch/labs/cvlab/software/descriptors-and-keypoints/
Image Descriptors and Keypoint Detection
Learned Local Features
Keypoint Detection
Floating-point Descriptors
Binary Descriptors
- BinBoost: Boosting Binary Keypoint Descriptors
- LDAHash: Binary Descriptor for Large-scale Applications
- BRIEF: Binary Descriptor for Low-cost Real-time Applications
- D-Brief: Efficient Discriminative Projections for Compact Binary Descriptors
Others
关于二进制描述符的教程
描述符简介
https://gilscvblog.com/2013/08/18/a-short-introduction-to-descriptors/#more-3
https://gilscvblog.com/2013/08/26/tutorial-on-binary-descriptors-part-1/
https://gilscvblog.com/2013/09/19/a-tutorial-on-binary-descriptors-part-2-the-brief-descriptor/
https://gilscvblog.com/2013/10/04/a-tutorial-on-binary-descriptors-part-3-the-orb-descriptor/
https://gilscvblog.com/2013/11/08/a-tutorial-on-binary-descriptors-part-4-the-brisk-descriptor/
https://gilscvblog.com/2013/12/09/a-tutorial-on-binary-descriptors-part-5-the-freak-descriptor/
向BRIEF描述符添加旋转不变性
https://gilscvblog.com/2015/01/02/adding-rotation-invariance-to-the-brief-descriptor/
二进制描述符的性能评估–引入LATCH描述符
https://docs.nvidia.com/isaac/isaac/packages/orb/doc/orb.html
论文地址: GMS: Grid-based Motion Statistics for Fast, Ultra-robust Feature Correspondence
工程地址:github链接
CMakeLists.txt
demo.cpp
gms_matcher.h
makeynh.sh
readme.txt
readme.txt
opencv3.4.1 CUDA9.0
makeynh.sh
#!/bin/bash
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_TESTS=OFF \
-D CMAKE_INSTALL_PREFIX="../install_3" \
-D WITH_MATLAB=OFF \
-D WITH_QT=OFF \
-D WITH_CUDA=ON \
-D ENABLE_FAST_MATH=ON \
-D CUDA_FAST_MATH=ON \
-D CUDA_GENERATION=Pascal \
-D WITH_FFMPEG=1 \
-D WITH_CUFFT=ON \
-D WITH_EIGEN=ON \
-D WITH_CUBLAS=1 \
-D WITH_OPENGL=ON \
-D WITH_TBB=ON \
-D WITH_LAPACK=OFF \
-D BUILD_opencv_vis=OFF \
-D BUILD_opencv_videostab=OFF \
-D BUILD_opencv_videoio=OFF \
-D BUILD_opencv_video=OFF \
-D BUILD_opencv_ts=OFF \
-D BUILD_opencv_superres=OFF \
-D BUILD_opencv_stitching=OFF \
-D BUILD_opencv_shape=OFF \
-D BUILD_opencv_python_bindings_generator=OFF \
-D BUILD_opencv_python=OFF \
-D BUILD_opencv_photo=OFF \
-D BUILD_opencv_objdetect=OFF \
-D BUILD_opencv_ml=OFF \
-D BUILD_opencv_js=OFF \
-D BUILD_opencv_java_bindings_genetator=OFF \
-D BUILD_opencv_dnn=OFF \
-D BUILD_opencv_cudastereo=OFF \
-D BUILD_opencv_cudaoptflow=OFF \
-D BUILD_opencv_cudaobjdetect=OFF \
-D BUILD_opencv_cudalegacy=OFF \
-D BUILD_opencv_cudabgsegm=OFF \
-D CUDA_NVCC_FLAGS=--expt-relaxed-constexpr \
-D ENABLE_PRECOMPILED_HEADERS=OFF ..
CMakeLists.txt
cmake_minimum_required(VERSION 3.12)
project(GMS_ICP)
set(CMAKE_CXX_STANDARD 14)
#set(OpenCV_DIR "/media/spple/新加卷/Dataset/opencv-3.4.8/install/share/OpenCV/")
set(OpenCV_DIR "/media/spple/新加卷2/opencv-3.4.1/install_2/share/OpenCV/")
find_package(OpenCV REQUIRED)
INCLUDE_DIRECTORIES(
${OpenCV_INCLUDE_DIRS}
)
add_executable(GMS_ICP gms_matcher.h demo.cpp)
target_link_libraries(GMS_ICP ${OpenCV_LIBS})
gms_matcher.h
#pragma once
#include <opencv2/opencv.hpp>
#include <vector>
#include <iostream>
#include <ctime>
using namespace std;
using namespace cv;
#define THRESH_FACTOR 6
// 8 possible rotation and each one is 3 X 3
const int mRotationPatterns[8][9] = {
1,2,3,
4,5,6,
7,8,9,
4,1,2,
7,5,3,
8,9,6,
7,4,1,
8,5,2,
9,6,3,
8,7,4,
9,5,1,
6,3,2,
9,8,7,
6,5,4,
3,2,1,
6,9,8,
3,5,7,
2,1,4,
3,6,9,
2,5,8,
1,4,7,
2,3,6,
1,5,9,
4,7,8
};
// 5 level scales
const double mScaleRatios[5] = { 1.0, 1.0 / 2, 1.0 / sqrt(2.0), sqrt(2.0), 2.0 };
class gms_matcher
{
public:
// OpenCV Keypoints & Correspond Image Size & Nearest Neighbor Matches
gms_matcher(const vector<KeyPoint> &vkp1, const Size size1, const vector<KeyPoint> &vkp2, const Size size2, const vector<DMatch> &vDMatches)
{
// Input initialize
NormalizePoints(vkp1, size1, mvP1);
NormalizePoints(vkp2, size2, mvP2);
mNumberMatches = vDMatches.size();
ConvertMatches(vDMatches, mvMatches);
// Grid initialize
mGridSizeLeft = Size(20, 20);
mGridNumberLeft = mGridSizeLeft.width * mGridSizeLeft.height;
// Initialize the neihbor of left grid
mGridNeighborLeft = Mat::zeros(mGridNumberLeft, 9, CV_32SC1);
InitalizeNiehbors(mGridNeighborLeft, mGridSizeLeft);
};
~gms_matcher() {};
private:
// Normalized Points
vector<Point2f> mvP1, mvP2;
// Matches
vector<pair<int, int> > mvMatches;
// Number of Matches
size_t mNumberMatches;
// Grid Size
Size mGridSizeLeft, mGridSizeRight;
int mGridNumberLeft;
int mGridNumberRight;
// x : left grid idx
// y : right grid idx
// value : how many matches from idx_left to idx_right
Mat mMotionStatistics;
//
vector<int> mNumberPointsInPerCellLeft;
// Inldex : grid_idx_left
// Value : grid_idx_right
vector<int> mCellPairs;
// Every Matches has a cell-pair
// first : grid_idx_left
// second : grid_idx_right
vector<pair<int, int> > mvMatchPairs;
// Inlier Mask for output
vector<bool> mvbInlierMask;
//
Mat mGridNeighborLeft;
Mat mGridNeighborRight;
public:
// Get Inlier Mask
// Return number of inliers
int GetInlierMask(vector<bool> &vbInliers, bool WithScale = false, bool WithRotation = false);
private:
// Normalize Key Points to Range(0 - 1)
void NormalizePoints(const vector<KeyPoint> &kp, const Size &size, vector<Point2f> &npts) {
const size_t numP = kp.size();
const int width = size.width;
const int height = size.height;
npts.resize(numP);
for (size_t i = 0; i < numP; i++)
{
npts[i].x = kp[i].pt.x / width;
npts[i].y = kp[i].pt.y / height;
}
}
// Convert OpenCV DMatch to Match (pair<int, int>)
void ConvertMatches(const vector<DMatch> &vDMatches, vector<pair<int, int> > &vMatches) {
vMatches.resize(mNumberMatches);
for (size_t i = 0; i < mNumberMatches; i++)
{
vMatches[i] = pair<int, int>(vDMatches[i].queryIdx, vDMatches[i].trainIdx);
}
}
int GetGridIndexLeft(const Point2f &pt, int type) {
int x = 0, y = 0;
if (type == 1) {
x = floor(pt.x * mGridSizeLeft.width);
y = floor(pt.y * mGridSizeLeft.height);
if (y >= mGridSizeLeft.height || x >= mGridSizeLeft.width){
return -1;
}
}
if (type == 2) {
x = floor(pt.x * mGridSizeLeft.width + 0.5);
y = floor(pt.y * mGridSizeLeft.height);
if (x >= mGridSizeLeft.width || x < 1) {
return -1;
}
}
if (type == 3) {
x = floor(pt.x * mGridSizeLeft.width);
y = floor(pt.y * mGridSizeLeft.height + 0.5);
if (y >= mGridSizeLeft.height || y < 1) {
return -1;
}
}
if (type == 4) {
x = floor(pt.x * mGridSizeLeft.width + 0.5);
y = floor(pt.y * mGridSizeLeft.height + 0.5);
if (y >= mGridSizeLeft.height || y < 1 || x >= mGridSizeLeft.width || x < 1) {
return -1;
}
}
return x + y * mGridSizeLeft.width;
}
int GetGridIndexRight(const Point2f &pt) {
int x = floor(pt.x * mGridSizeRight.width);
int y = floor(pt.y * mGridSizeRight.height);
return x + y * mGridSizeRight.width;
}
// Assign Matches to Cell Pairs
void AssignMatchPairs(int GridType);
// Verify Cell Pairs
void VerifyCellPairs(int RotationType);
// Get Neighbor 9
vector<int> GetNB9(const int idx, const Size& GridSize) {
vector<int> NB9(9, -1);
int idx_x = idx % GridSize.width;
int idx_y = idx / GridSize.width;
for (int yi = -1; yi <= 1; yi++)
{
for (int xi = -1; xi <= 1; xi++)
{
int idx_xx = idx_x + xi;
int idx_yy = idx_y + yi;
if (idx_xx < 0 || idx_xx >= GridSize.width || idx_yy < 0 || idx_yy >= GridSize.height)
continue;
NB9[xi + 4 + yi * 3] = idx_xx + idx_yy * GridSize.width;
}
}
return NB9;
}
void InitalizeNiehbors(Mat &neighbor, const Size& GridSize) {
for (int i = 0; i < neighbor.rows; i++)
{
vector<int> NB9 = GetNB9(i, GridSize);
int *data = neighbor.ptr<int>(i);
memcpy(data, &NB9[0], sizeof(int) * 9);
}
}
void SetScale(int Scale) {
// Set Scale
mGridSizeRight.width = mGridSizeLeft.width * mScaleRatios[Scale];
mGridSizeRight.height = mGridSizeLeft.height * mScaleRatios[Scale];
mGridNumberRight = mGridSizeRight.width * mGridSizeRight.height;
// Initialize the neihbor of right grid
mGridNeighborRight = Mat::zeros(mGridNumberRight, 9, CV_32SC1);
InitalizeNiehbors(mGridNeighborRight, mGridSizeRight);
}
// Run
int run(int RotationType);
};
int gms_matcher::GetInlierMask(vector<bool> &vbInliers, bool WithScale, bool WithRotation) {
int max_inlier = 0;
if (!WithScale && !WithRotation)
{
SetScale(0);
max_inlier = run(1);
vbInliers = mvbInlierMask;
return max_inlier;
}
if (WithRotation && WithScale)
{
for (int Scale = 0; Scale < 5; Scale++)
{
SetScale(Scale);
for (int RotationType = 1; RotationType <= 8; RotationType++)
{
int num_inlier = run(RotationType);
if (num_inlier > max_inlier)
{
vbInliers = mvbInlierMask;
max_inlier = num_inlier;
}
}
}
return max_inlier;
}
if (WithRotation && !WithScale)
{
SetScale(0);
for (int RotationType = 1; RotationType <= 8; RotationType++)
{
int num_inlier = run(RotationType);
if (num_inlier > max_inlier)
{
vbInliers = mvbInlierMask;
max_inlier = num_inlier;
}
}
return max_inlier;
}
if (!WithRotation && WithScale)
{
for (int Scale = 0; Scale < 5; Scale++)
{
SetScale(Scale);
int num_inlier = run(1);
if (num_inlier > max_inlier)
{
vbInliers = mvbInlierMask;
max_inlier = num_inlier;
}
}
return max_inlier;
}
return max_inlier;
}
void gms_matcher::AssignMatchPairs(int GridType) {
for (size_t i = 0; i < mNumberMatches; i++)
{
Point2f &lp = mvP1[mvMatches[i].first];
Point2f &rp = mvP2[mvMatches[i].second];
int lgidx = mvMatchPairs[i].first = GetGridIndexLeft(lp, GridType);
int rgidx = -1;
if (GridType == 1)
{
rgidx = mvMatchPairs[i].second = GetGridIndexRight(rp);
}
else
{
rgidx = mvMatchPairs[i].second;
}
if (lgidx < 0 || rgidx < 0) continue;
mMotionStatistics.at<int>(lgidx, rgidx)++;
mNumberPointsInPerCellLeft[lgidx]++;
}
}
void gms_matcher::VerifyCellPairs(int RotationType) {
const int *CurrentRP = mRotationPatterns[RotationType - 1];
for (int i = 0; i < mGridNumberLeft; i++)
{
if (sum(mMotionStatistics.row(i))[0] == 0)
{
mCellPairs[i] = -1;
continue;
}
int max_number = 0;
for (int j = 0; j < mGridNumberRight; j++)
{
int *value = mMotionStatistics.ptr<int>(i);
if (value[j] > max_number)
{
mCellPairs[i] = j;
max_number = value[j];
}
}
int idx_grid_rt = mCellPairs[i];
const int *NB9_lt = mGridNeighborLeft.ptr<int>(i);
const int *NB9_rt = mGridNeighborRight.ptr<int>(idx_grid_rt);
int score = 0;
double thresh = 0;
int numpair = 0;
for (size_t j = 0; j < 9; j++)
{
int ll = NB9_lt[j];
int rr = NB9_rt[CurrentRP[j] - 1];
if (ll == -1 || rr == -1) continue;
score += mMotionStatistics.at<int>(ll, rr);
thresh += mNumberPointsInPerCellLeft[ll];
numpair++;
}
thresh = THRESH_FACTOR * sqrt(thresh / numpair);
if (score < thresh)
mCellPairs[i] = -2;
}
}
int gms_matcher::run(int RotationType) {
mvbInlierMask.assign(mNumberMatches, false);
// Initialize Motion Statisctics
mMotionStatistics = Mat::zeros(mGridNumberLeft, mGridNumberRight, CV_32SC1);
mvMatchPairs.assign(mNumberMatches, pair<int, int>(0, 0));
for (int GridType = 1; GridType <= 4; GridType++)
{
// initialize
mMotionStatistics.setTo(0);
mCellPairs.assign(mGridNumberLeft, -1);
mNumberPointsInPerCellLeft.assign(mGridNumberLeft, 0);
AssignMatchPairs(GridType);
VerifyCellPairs(RotationType);
// Mark inliers
for (size_t i = 0; i < mNumberMatches; i++)
{
if (mvMatchPairs[i].first >= 0) {
if (mCellPairs[mvMatchPairs[i].first] == mvMatchPairs[i].second)
{
mvbInlierMask[i] = true;
}
}
}
}
int num_inlier = sum(mvbInlierMask)[0];
return num_inlier;
}
demo.cpp
#include "gms_matcher.h"
#include <ctime>
#define USE_GPU
#ifdef USE_GPU
#include <opencv2/cudafeatures2d.hpp>
using cuda::GpuMat;
#endif
void GmsMatch(Mat &img1, Mat &img2, Mat &img1_ori, Mat &img2_ori);
Mat DrawInlier(Mat &src1, Mat &src2, vector<KeyPoint> &kpt1, vector<KeyPoint> &kpt2, vector<DMatch> &inlier, int type);
cv::Mat LeftUpRightDownDrawInlier(const cv::Mat &queryImage, const cv::Mat &objectImage,
const vector<cv::Point2f> &queryCoord, const vector<cv::Point2f> &objectCoord)
{
Size sz = Size(queryImage.size().width + objectImage.size().width,
queryImage.size().height + objectImage.size().height);
Mat matchingImage = Mat::zeros(sz, CV_8UC3);
// 设置matchingImage的感兴趣区域大小并赋予原图
Mat roi1 = Mat(matchingImage, Rect(0, 0, queryImage.size().width, queryImage.size().height));
queryImage.copyTo(roi1);
Mat roi2 = Mat(matchingImage, Rect(queryImage.size().width, queryImage.size().height, objectImage.size().width, objectImage.size().height));
objectImage.copyTo(roi2);
//画出点
for (int i = 0; i < (int)queryCoord.size(); ++i) {
Point2f pt1 = queryCoord[i];
Point2f pt2 = objectCoord[i];
Point2f from = pt1;
Point2f to = Point(pt2.x + queryImage.size().width, pt2.y + queryImage.size().height);
line(matchingImage, from, to, Scalar(0, 255, 255));
}
return matchingImage;
}
/*
* Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
* Input:
* A: Nxm numpy array of corresponding points
* B: Nxm numpy array of corresponding points
* Returns:
* T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
* R: mxm rotation matrix
* t: mx1 translation vector
* //https://blog.csdn.net/kewei9/article/details/74157236
*/
void best_fit_transform(std::vector<cv::Point3f> A, std::vector<cv::Point3f> B, cv::Mat & T, cv::Mat & R, cv::Mat & t)
{
if (A.size()!=B.size())
{
std::cout <<"error:::" << "A.size()!=B.size()" << std::endl;
}
int pointsNum = A.size();
//# translate points to their centroids
double srcSumX = 0.0f;
double srcSumY = 0.0f;
double srcSumZ = 0.0f;
double dstSumX = 0.0f;
double dstSumY = 0.0f;
double dstSumZ = 0.0f;
for (int i = 0; i < pointsNum; ++ i)
{
srcSumX += A[i].x;
srcSumY += A[i].y;
srcSumZ += A[i].z;
dstSumX += B[i].x;
dstSumY += B[i].y;
dstSumZ += B[i].z;
}
cv::Point3f centerSrc, centerDst;
centerSrc.x = float(srcSumX / pointsNum);
centerSrc.y = float(srcSumY / pointsNum);
centerSrc.z = float(srcSumZ / pointsNum);
centerDst.x = float(dstSumX / pointsNum);
centerDst.y = float(dstSumY / pointsNum);
centerDst.z = float(dstSumZ / pointsNum);
int m = 3;
cv::Mat srcMat(m, pointsNum, CV_32FC1);
cv::Mat dstMat(m, pointsNum, CV_32FC1);
float* srcDat = (float*)(srcMat.data);
float* dstDat = (float*)(dstMat.data);
for (int i = 0; i < pointsNum; ++ i)
{
srcDat[i] = A[i].x - centerSrc.x;
srcDat[pointsNum + i] = A[i].y - centerSrc.y;
srcDat[pointsNum * 2 + i] = A[i].z - centerSrc.z;
dstDat[i] = B[i].x - centerDst.x;
dstDat[pointsNum + i] = B[i].y - centerDst.y;
dstDat[pointsNum * 2 + i] = B[i].z - centerDst.z;
}
//# rotation matrix
cv::Mat matS = srcMat * dstMat.t();
cv::Mat matU, matW, matV;
cv::SVDecomp(matS, matW, matU, matV);
R = matV.t() * matU.t();
//# special reflection case
double det = cv::determinant(R);
if (det < 0)
{
//https://blog.csdn.net/xiaowei_cqu/article/details/19839019
float* data= matV.ptr<float>(m-1);
for (int i=0; i<matV.cols; i++) {
*data++= (*data * (-1));
}
R = matV.t() * matU.t();
}
//t = centroid_B.T - np.dot(R, centroid_A.T)
//# translation
//Mat(centerDst)=3*1
t = Mat(centerDst) - (R * Mat(centerSrc));
//T = np.identity(m + 1)
//T[:m, :m] = R
//T[:m, m] = t
//# homogeneous transformation
double datM[] = {1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1};
cv::Mat matM(4, 4, CV_32FC1, datM);
T = matM.clone();
}
void runImagePair() {
Mat img1_ori = imread("../data/1_IMG_Texture_8Bit.png");
Mat img2_ori = imread("../data/2_IMG_Texture_8Bit.png");
for (int i=0;i<1000;i++) {
clock_t begin2 = clock();
Mat img1,img2;
//int ratio = MIN(img1.rows, img1.cols)
cv::resize(img1_ori, img1, Size(800,600));
cv::resize(img2_ori, img2, Size(800,600));
GmsMatch(img1, img2, img1_ori, img2_ori);
clock_t end2 = clock();
cout<<"yangninghua Running time: "<<(double)(end2-begin2)/CLOCKS_PER_SEC*1000<<"ms"<<endl;
}
}
int main()
{
#ifdef USE_GPU
int flag = cuda::getCudaEnabledDeviceCount();
if (flag != 0) { cuda::setDevice(0); }
#endif // USE_GPU
runImagePair();
return 0;
}
void GmsMatch(Mat &img1, Mat &img2, Mat &img1_ori, Mat &img2_ori) {
float ratio_x = img1_ori.cols / (1.0 * img1.cols);
float ratio_y = img1_ori.rows / (1.0 * img1.rows);
vector<KeyPoint> kp1, kp2;
Mat d1, d2;
vector<DMatch> matches_all, matches_gms;
//该超参数可以修改,但是至少保证50000以上
Ptr<ORB> orb = ORB::create(100000);
orb->setFastThreshold(0);
orb->detectAndCompute(img1, Mat(), kp1, d1);
orb->detectAndCompute(img2, Mat(), kp2, d2);
cout << "Get total " << kp1.size() << " kp1." << endl;
cout << "Get total " << kp2.size() << " kp2." << endl;
#ifdef USE_GPU
GpuMat gd1(d1), gd2(d2);
Ptr<cuda::DescriptorMatcher> matcher = cv::cuda::DescriptorMatcher::createBFMatcher(NORM_HAMMING);
matcher->match(gd1, gd2, matches_all);
#else
BFMatcher matcher(NORM_HAMMING);
matcher.match(d1, d2, matches_all);
#endif
// GMS filter
std::vector<bool> vbInliers;
gms_matcher gms(kp1, img1.size(), kp2, img2.size(), matches_all);
int num_inliers = gms.GetInlierMask(vbInliers, false, false);
cout << "Get total " << num_inliers << " matches." << endl;
// collect matches
for (size_t i = 0; i < vbInliers.size(); ++i)
{
if (vbInliers[i] == true)
{
matches_gms.push_back(matches_all[i]);
}
}
Mat show = DrawInlier(img1, img2, kp1, kp2, matches_gms, 1);
std::vector<cv::Point2f> queryPoints;
std::vector<cv::Point2f> trainPoints;
vector<DMatch> goodMatches;
//https://blog.csdn.net/linxihe123/article/details/70173476
vector<pair<KeyPoint,KeyPoint> >kp_pairs_temp;
for (size_t i = 0; i < matches_gms.size(); i++)
{
KeyPoint temp1 = kp1[matches_gms[i].queryIdx];
KeyPoint temp2 = kp2[matches_gms[i].trainIdx];
goodMatches.push_back(matches_gms[i]);
queryPoints.push_back(temp1.pt);
trainPoints.push_back(temp2.pt);
kp_pairs_temp.push_back(make_pair(temp1,temp2));
}
// 4点条件判断
const int minNumberMatchesAllowed = 4;
if (queryPoints.size() < minNumberMatchesAllowed)
return;
double reprojectionThreshold=20.0;
std::vector<unsigned char> inliersMask(goodMatches.size());
Mat homography = findHomography(queryPoints,
trainPoints,
FM_RANSAC,
reprojectionThreshold,
inliersMask,
2000,
0.995);
// Mat homography = findHomography(queryPoints,
// trainPoints,
// inliersMask,
// FM_RANSAC);
// std::vector<unsigned char> RansacStatus(goodMatches.size());
// Mat Fundamental = findFundamentalMat(queryPoints,
// trainPoints,
// FM_RANSAC,
// 20,
// 0.99,
// RansacStatus);
// Mat Fundamental = findFundamentalMat(queryPoints,
// trainPoints,
// RansacStatus,
// FM_RANSAC);
vector<pair<KeyPoint,KeyPoint> >kp_pairs;
std::vector<cv::Point2f> newp1;
std::vector<cv::Point2f> newp2;
std::vector<cv::Point2f> newp1c;
std::vector<cv::Point2f> newp2c;
vector<DMatch> goodgoodMatches;
int hh = img1_ori.rows;
int ww = img1_ori.cols;
for (size_t i=0; i<inliersMask.size(); i++)
{
if (inliersMask[i])
{
goodgoodMatches.push_back(goodMatches[i]);
kp_pairs.push_back(kp_pairs_temp[i]);
float x1 = kp_pairs_temp[i].first.pt.x * ratio_x;
float y1 = kp_pairs_temp[i].first.pt.y * ratio_y;
float x2 = kp_pairs_temp[i].second.pt.x * ratio_x;
float y2 = kp_pairs_temp[i].second.pt.y * ratio_y;
x1 = x1 > ww ? ww : x1;
x1 = x1 < 0 ? 0 : x1;
x2 = x2 > ww ? ww : x2;
x2 = x2 < 0 ? 0 : x2;
y1 = y1 > hh ? hh : y1;
y1 = y1 < 0 ? 0 : y1;
y2 = y2 > hh ? hh : y2;
y2 = y2 < 0 ? 0 : y2;
newp1.push_back(cv::Point2f(x1,y1));
newp2.push_back(cv::Point2f(x2,y2));
newp1c.push_back(kp_pairs_temp[i].first.pt);
newp2c.push_back(kp_pairs_temp[i].second.pt);
}
}
Mat img_RR_matches = LeftUpRightDownDrawInlier(img1, img2, newp1c, newp2c);
Mat img_CCC_matches = LeftUpRightDownDrawInlier(img1_ori, img2_ori, newp1, newp2);
//https://docs.opencv.org/3.0-beta/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#findessentialmat
//https://docs.opencv.org/3.4.8/d9/d0c/group__calib3d.html#ga13f7e34de8fa516a686a56af1196247f
std::vector<unsigned char> EssentialMask(newp1.size());
cv::Mat intrinsics = (cv::Mat_<float>(3, 3) << 2269.16, 0, 1065.54, 0, 2268.4, 799.032, 0, 0, 1);
Mat Essential = findEssentialMat(newp1, newp2, intrinsics, cv::RANSAC, 0.999, 20, EssentialMask);
cv::Mat OriginalDepthImage = cv::imread("/home/spple/CLionProjects/ASIFT/11.14/22_IMG_DepthMap.tif", -1);
cv::Mat targetDepthImage = cv::imread("/home/spple/CLionProjects/ASIFT/11.14/33_IMG_DepthMap.tif", -1);
std::vector<cv::Point3f> trp1;
std::vector<cv::Point3f> trp2;
std::vector<cv::Point2f> trp1_temp;
std::vector<cv::Point2f> trp2_temp;
for (size_t key=0; key<newp1.size(); key++)
{
float x1 = newp1[key].x;
float y1 = newp1[key].y;
float x2 = newp2[key].x;
float y2 = newp2[key].y;
float d1 = OriginalDepthImage.at<float>(int(y1),int(x1));
cv::Point3f p1;
p1.z = float(d1) / intrinsics.ptr<float>(2)[2];
p1.x = (x1 - intrinsics.ptr<float>(0)[2]) * p1.z / intrinsics.ptr<float>(0)[0];
p1.y = (y1 - intrinsics.ptr<float>(1)[2]) * p1.z / intrinsics.ptr<float>(1)[1];
float d2 = targetDepthImage.at<float>(int(y2),int(x2));
cv::Point3f p2;
p2.z = float(d2) / intrinsics.ptr<float>(2)[2];
p2.x = (x2 - intrinsics.ptr<float>(0)[2]) * p2.z / intrinsics.ptr<float>(0)[0];
p2.y = (y2 - intrinsics.ptr<float>(1)[2]) * p2.z / intrinsics.ptr<float>(1)[1];
//>0.001是为了去除深度为0的特征点
if(EssentialMask[key] and ((abs(p1.x) + abs(p1.y) + abs(p1.z)) > 0.001) and ((abs(p2.x) + abs(p2.y) + abs(p2.z)) > 0.001))
{
trp1.push_back(p1);
trp2.push_back(p2);
trp1_temp.push_back(newp1[key]);
trp2_temp.push_back(newp2[key]);
}
}
Mat img_LL_matches = LeftUpRightDownDrawInlier(img1_ori, img2_ori, trp1_temp, trp2_temp);
// draw matching
imwrite("../data/a.jpg",show);
imwrite("../data/b.jpg",img_RR_matches);
imwrite("../data/b1.jpg",img_CCC_matches);
imwrite("../data/c.jpg",img_LL_matches);
cout << "ICP start" << endl;
Mat T, R, t;
best_fit_transform(trp1, trp2, T, R, t);
/*
* python下的结果
* 11-22
[[ 0.9893 0.04623 0.1395 ]
[-0.04523 0.999 -0.0105 ]
[-0.1399 0.004074 0.99 ]]
[-42.38 92.5 -93.5 ]
* 11-33
[[ 0.979 -0.03976 -0.2006 ]
[ 0.01021 0.9893 -0.1462 ]
[ 0.2042 0.1411 0.9688 ]]
[ -2.81 162. -51.78]
*/
cout << R << endl;
cout << t << endl;
cout << "end code" << endl;
}
Mat DrawInlier(Mat &src1, Mat &src2, vector<KeyPoint> &kpt1, vector<KeyPoint> &kpt2, vector<DMatch> &inlier, int type) {
const int height = max(src1.rows, src2.rows);
const int width = src1.cols + src2.cols;
Mat output(height, width, CV_8UC3, Scalar(0, 0, 0));
src1.copyTo(output(Rect(0, 0, src1.cols, src1.rows)));
src2.copyTo(output(Rect(src1.cols, 0, src2.cols, src2.rows)));
if (type == 1)
{
for (size_t i = 0; i < inlier.size(); i++)
{
Point2f left = kpt1[inlier[i].queryIdx].pt;
Point2f right = (kpt2[inlier[i].trainIdx].pt + Point2f((float)src1.cols, 0.f));
line(output, left, right, Scalar(0, 255, 255));
}
}
else if (type == 2)
{
for (size_t i = 0; i < inlier.size(); i++)
{
Point2f left = kpt1[inlier[i].queryIdx].pt;
Point2f right = (kpt2[inlier[i].trainIdx].pt + Point2f((float)src1.cols, 0.f));
line(output, left, right, Scalar(255, 0, 0));
}
for (size_t i = 0; i < inlier.size(); i++)
{
Point2f left = kpt1[inlier[i].queryIdx].pt;
Point2f right = (kpt2[inlier[i].trainIdx].pt + Point2f((float)src1.cols, 0.f));
circle(output, left, 1, Scalar(0, 255, 255), 2);
circle(output, right, 1, Scalar(0, 255, 0), 2);
}
}
return output;
}
可视化显示 demo.cpp:
cmake_minimum_required(VERSION 3.12)
project(GMS_ICP)
set(CMAKE_CXX_STANDARD 14)
#set(OpenCV_DIR "/media/spple/新加卷/Dataset/opencv-3.4.8/install/share/OpenCV/")
set(OpenCV_DIR "/media/spple/新加卷1/opencv-3.4.1/install_4/share/OpenCV/")
find_package(OpenCV REQUIRED)
set(PCL_DIR "/media/spple/新加卷/Dataset/pcl-master/install/share/pcl-1.9/")
find_package(PCL REQUIRED)
INCLUDE_DIRECTORIES(
${OpenCV_INCLUDE_DIRS}
)
add_executable(GMS_ICP gms_matcher.h demo.cpp)
target_link_libraries(GMS_ICP ${OpenCV_LIBS})
include_directories(${PCL_INCLUDE_DIRS})
link_directories(${PCL_LIBRARY_DIRS})
add_definitions(${PCL_DEFINITIONS})
list(REMOVE_ITEM PCL_LIBRARIES "vtkproj4")
target_link_libraries(GMS_ICP ${PCL_LIBRARIES})
#include <pcl/io/ply_io.h>
#include <pcl/point_types.h>
#include <pcl/registration/icp.h>
#include <pcl/visualization/keyboard_event.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/time.h> // TicToc
#include <pcl/filters/voxel_grid.h>
//PointXYZRGBNormal PointNormal
typedef pcl::PointXYZRGBNormal PointT;
typedef pcl::PointCloud<PointT> PointCloudT;
bool next_iteration = false;
#include "gms_matcher.h"
#include <ctime>
#define USE_GPU
#ifdef USE_GPU
#include <opencv2/cudafeatures2d.hpp>
using cuda::GpuMat;
#endif
#include <opencv2/calib3d.hpp>
void
print4x4Matrix (const Eigen::Matrix4d & matrix)
{
printf ("Rotation matrix :\n");
printf (" | %6.3f %6.3f %6.3f | \n", matrix (0, 0), matrix (0, 1), matrix (0, 2));
printf ("R = | %6.3f %6.3f %6.3f | \n", matrix (1, 0), matrix (1, 1), matrix (1, 2));
printf (" | %6.3f %6.3f %6.3f | \n", matrix (2, 0), matrix (2, 1), matrix (2, 2));
printf ("Translation vector :\n");
printf ("t = < %6.3f, %6.3f, %6.3f >\n\n", matrix (0, 3), matrix (1, 3), matrix (2, 3));
}
void
keyboardEventOccurred (const pcl::visualization::KeyboardEvent& event,
void* nothing)
{
if (event.getKeySym () == "space" && event.keyDown ())
next_iteration = true;
}
// Downsample
void voxel_grid_filter(PointCloudT::Ptr cloud_in, float leafsize)
{
pcl::VoxelGrid<PointT> sor;
sor.setInputCloud(cloud_in);
sor.setLeafSize(leafsize, leafsize, leafsize);
sor.filter(*cloud_in);
// std::cout << "after filter has " << cloud_in->points.size () << " points\n";
}
int GmsMatch(Mat &img1, Mat &img2, Mat &img1_ori, Mat &img2_ori);
Mat DrawInlier(Mat &src1, Mat &src2, vector<KeyPoint> &kpt1, vector<KeyPoint> &kpt2, vector<DMatch> &inlier, int type);
cv::Mat LeftUpRightDownDrawInlier(const cv::Mat &queryImage, const cv::Mat &objectImage,
const vector<cv::Point2f> &queryCoord, const vector<cv::Point2f> &objectCoord)
{
Size sz = Size(queryImage.size().width + objectImage.size().width,
queryImage.size().height + objectImage.size().height);
Mat matchingImage = Mat::zeros(sz, CV_8UC3);
// 设置matchingImage的感兴趣区域大小并赋予原图
Mat roi1 = Mat(matchingImage, Rect(0, 0, queryImage.size().width, queryImage.size().height));
queryImage.copyTo(roi1);
Mat roi2 = Mat(matchingImage, Rect(queryImage.size().width, queryImage.size().height, objectImage.size().width, objectImage.size().height));
objectImage.copyTo(roi2);
//画出点
for (int i = 0; i < (int)queryCoord.size(); ++i) {
Point2f pt1 = queryCoord[i];
Point2f pt2 = objectCoord[i];
Point2f from = pt1;
Point2f to = Point(pt2.x + queryImage.size().width, pt2.y + queryImage.size().height);
line(matchingImage, from, to, Scalar(0, 255, 255));
}
return matchingImage;
}
/*
* Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
* Input:
* A: Nxm numpy array of corresponding points
* B: Nxm numpy array of corresponding points
* Returns:
* T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
* R: mxm rotation matrix
* t: mx1 translation vector
* //https://blog.csdn.net/kewei9/article/details/74157236
*/
void best_fit_transform(std::vector<cv::Point3f> A, std::vector<cv::Point3f> B, cv::Mat & T, cv::Mat & R, cv::Mat & t)
{
if (A.size()!=B.size())
{
std::cout <<"error:::" << "A.size()!=B.size()" << std::endl;
}
int pointsNum = A.size();
//# translate points to their centroids
double srcSumX = 0.0f;
double srcSumY = 0.0f;
double srcSumZ = 0.0f;
double dstSumX = 0.0f;
double dstSumY = 0.0f;
double dstSumZ = 0.0f;
for (int i = 0; i < pointsNum; ++ i)
{
srcSumX += A[i].x;
srcSumY += A[i].y;
srcSumZ += A[i].z;
dstSumX += B[i].x;
dstSumY += B[i].y;
dstSumZ += B[i].z;
}
cv::Point3f centerSrc, centerDst;
centerSrc.x = float(srcSumX / pointsNum);
centerSrc.y = float(srcSumY / pointsNum);
centerSrc.z = float(srcSumZ / pointsNum);
centerDst.x = float(dstSumX / pointsNum);
centerDst.y = float(dstSumY / pointsNum);
centerDst.z = float(dstSumZ / pointsNum);
int m = 3;
cv::Mat srcMat(m, pointsNum, CV_32FC1);
cv::Mat dstMat(m, pointsNum, CV_32FC1);
float* srcDat = (float*)(srcMat.data);
float* dstDat = (float*)(dstMat.data);
for (int i = 0; i < pointsNum; ++ i)
{
srcDat[i] = A[i].x - centerSrc.x;
srcDat[pointsNum + i] = A[i].y - centerSrc.y;
srcDat[pointsNum * 2 + i] = A[i].z - centerSrc.z;
dstDat[i] = B[i].x - centerDst.x;
dstDat[pointsNum + i] = B[i].y - centerDst.y;
dstDat[pointsNum * 2 + i] = B[i].z - centerDst.z;
}
//# rotation matrix
cv::Mat matS = srcMat * dstMat.t();
cv::Mat matU, matW, matV;
cv::SVDecomp(matS, matW, matU, matV);
R = matV.t() * matU.t();
//# special reflection case
double det = cv::determinant(R);
if (det < 0)
{
//https://blog.csdn.net/xiaowei_cqu/article/details/19839019
float* data= matV.ptr<float>(m-1);
for (int i=0; i<matV.cols; i++) {
*data++= (*data * (-1));
}
R = matV.t() * matU.t();
}
//t = centroid_B.T - np.dot(R, centroid_A.T)
//# translation
//Mat(centerDst)=3*1
t = Mat(centerDst) - (R * Mat(centerSrc));
//T = np.identity(m + 1)
//T[:m, :m] = R
//T[:m, m] = t
//# homogeneous transformation
double datM[] = {1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1};
cv::Mat matM(4, 4, CV_32FC1, datM);
T = matM.clone();
}
void runImagePair() {
Mat img1_ori = imread("../data/OtherSampleFrame_IMG_Texture_8Bit_48.png");
Mat img2_ori = imread("../data/OtherSampleFrame_IMG_Texture_8Bit_52.png");
//for (int i=0;i<1000;i++) {
clock_t begin2 = clock();
Mat img1,img2;
//int ratio = MIN(img1.rows, img1.cols)
cv::resize(img1_ori, img1, Size(800,600));
cv::resize(img2_ori, img2, Size(800,600));
GmsMatch(img1, img2, img1_ori, img2_ori);
clock_t end2 = clock();
cout<<"yangninghua Running time: "<<(double)(end2-begin2)/CLOCKS_PER_SEC*1000<<"ms"<<endl;
//}
}
int main()
{
#ifdef USE_GPU
int flag = cuda::getCudaEnabledDeviceCount();
if (flag != 0) { cuda::setDevice(0); }
#endif // USE_GPU
runImagePair();
return 0;
}
int GmsMatch(Mat &img1, Mat &img2, Mat &img1_ori, Mat &img2_ori) {
float ratio_x = img1_ori.cols / (1.0 * img1.cols);
float ratio_y = img1_ori.rows / (1.0 * img1.rows);
vector<KeyPoint> kp1, kp2;
Mat d1, d2;
vector<DMatch> matches_all, matches_gms;
//该超参数可以修改,但是至少保证50000以上
Ptr<ORB> orb = ORB::create(100000);
orb->setFastThreshold(0);
orb->detectAndCompute(img1, Mat(), kp1, d1);
orb->detectAndCompute(img2, Mat(), kp2, d2);
cout << "Get total " << kp1.size() << " kp1." << endl;
cout << "Get total " << kp2.size() << " kp2." << endl;
#ifdef USE_GPU
GpuMat gd1(d1), gd2(d2);
Ptr<cuda::DescriptorMatcher> matcher = cv::cuda::DescriptorMatcher::createBFMatcher(NORM_HAMMING);
matcher->match(gd1, gd2, matches_all);
#else
BFMatcher matcher(NORM_HAMMING);
matcher.match(d1, d2, matches_all);
#endif
// GMS filter
std::vector<bool> vbInliers;
gms_matcher gms(kp1, img1.size(), kp2, img2.size(), matches_all);
int num_inliers = gms.GetInlierMask(vbInliers, false, false);
cout << "Get total " << num_inliers << " matches." << endl;
// collect matches
for (size_t i = 0; i < vbInliers.size(); ++i)
{
if (vbInliers[i] == true)
{
matches_gms.push_back(matches_all[i]);
}
}
Mat show = DrawInlier(img1, img2, kp1, kp2, matches_gms, 1);
std::vector<cv::Point2f> queryPoints;
std::vector<cv::Point2f> trainPoints;
vector<DMatch> goodMatches;
//https://blog.csdn.net/linxihe123/article/details/70173476
vector<pair<KeyPoint,KeyPoint> >kp_pairs_temp;
for (size_t i = 0; i < matches_gms.size(); i++)
{
KeyPoint temp1 = kp1[matches_gms[i].queryIdx];
KeyPoint temp2 = kp2[matches_gms[i].trainIdx];
goodMatches.push_back(matches_gms[i]);
queryPoints.push_back(temp1.pt);
trainPoints.push_back(temp2.pt);
kp_pairs_temp.push_back(make_pair(temp1,temp2));
}
// 4点条件判断
const int minNumberMatchesAllowed = 4;
if (queryPoints.size() < minNumberMatchesAllowed)
return -1;
double reprojectionThreshold=20.0;
std::vector<unsigned char> inliersMask(goodMatches.size());
Mat homography = findHomography(queryPoints,
trainPoints,
FM_RANSAC,
reprojectionThreshold,
inliersMask,
20,
0.995);
vector<cv::Point2f> queryInliers;
vector<cv::Point2f> sceneInliers;
int inliers_cnt = 0, outliers_cnt = queryPoints.size();
const float inlier_threshold = 2.8f;
for (unsigned i = 0; i < queryPoints.size(); i++) {
Mat col = Mat::ones(3, 1, CV_64F);
col.at<double>(0) = queryPoints[i].x;
col.at<double>(1) = queryPoints[i].y;
col = homography * col;
col /= col.at<double>(2); // 将[x*, y*, #] 转换为 [x*/#, y*/#, 1]
double dist = sqrt(pow(col.at<double>(0) - trainPoints[i].x, 2) +
pow(col.at<double>(1) - trainPoints[i].y, 2)); // 计算误差-欧式距离
if (dist < inlier_threshold) { // 误差小于阈值
queryInliers.push_back(queryPoints[i]);
sceneInliers.push_back(trainPoints[i]);
inliers_cnt++;
}
}
cout << inliers_cnt/(outliers_cnt+0.000001) << endl;
// Mat homography = findHomography(queryPoints,
// trainPoints,
// inliersMask,
// FM_RANSAC);
// std::vector<unsigned char> RansacStatus(goodMatches.size());
// Mat Fundamental = findFundamentalMat(queryPoints,
// trainPoints,
// FM_RANSAC,
// 20,
// 0.99,
// RansacStatus);
// Mat Fundamental = findFundamentalMat(queryPoints,
// trainPoints,
// RansacStatus,
// FM_RANSAC);
vector<pair<KeyPoint,KeyPoint> >kp_pairs;
std::vector<cv::Point2f> newp1;
std::vector<cv::Point2f> newp2;
std::vector<cv::Point2f> newp1c;
std::vector<cv::Point2f> newp2c;
vector<DMatch> goodgoodMatches;
int hh = img1_ori.rows;
int ww = img1_ori.cols;
for (size_t i=0; i<inliersMask.size(); i++)
{
if (inliersMask[i])
{
goodgoodMatches.push_back(goodMatches[i]);
kp_pairs.push_back(kp_pairs_temp[i]);
float x1 = kp_pairs_temp[i].first.pt.x * ratio_x;
float y1 = kp_pairs_temp[i].first.pt.y * ratio_y;
float x2 = kp_pairs_temp[i].second.pt.x * ratio_x;
float y2 = kp_pairs_temp[i].second.pt.y * ratio_y;
x1 = x1 > ww ? ww : x1;
x1 = x1 < 0 ? 0 : x1;
x2 = x2 > ww ? ww : x2;
x2 = x2 < 0 ? 0 : x2;
y1 = y1 > hh ? hh : y1;
y1 = y1 < 0 ? 0 : y1;
y2 = y2 > hh ? hh : y2;
y2 = y2 < 0 ? 0 : y2;
newp1.push_back(cv::Point2f(x1,y1));
newp2.push_back(cv::Point2f(x2,y2));
newp1c.push_back(kp_pairs_temp[i].first.pt);
newp2c.push_back(kp_pairs_temp[i].second.pt);
}
}
/*
CameraMatrix:
[2269.16, 0, 1065.54]
[0, 2268.4, 799.032]
[0, 0, 1]
DistortionCoefficients:
Format is the following:
(k1, k2, p1, p2[, k3[, k4, k5, k6[, s1, s2, s3, s4[, tx, ty]]]])
distCoeffs, 摄像机的5个畸变系数,(k1,k2,p1,p2[,k3[,k4,k5,k6]])
(-0.121994, 0.154463, 0.000367495, -0.000926385[, -0.0307676[, 0, 0, 0[, 0, 0, 0, 0[, 0, 0]]]])
*/
//cv::Mat intrinsics = (cv::Mat_<float>(3, 3) << 2269.16, 0, 1065.54, 0, 2268.4, 799.032, 0, 0, 1);
//https://docs.opencv.org/3.4/d9/d47/samples_2cpp_2tutorial_code_2features2D_2Homography_2homography_from_camera_displacement_8cpp-example.html
// Mat cameraMatrix = (Mat_ <float>(3, 3) << 2269.16, 0, 1065.54, 0, 2268.4, 799.032, 0, 0, 1);
// Mat distCoeffs = (Mat_ <float>(5, 1) << -0.121994, 0.154463, 0.000367495, -0.000926385, -0.0307676);
//
// vector<Point2f> corners1, corners2;
// vector<Point3f> objectPoints;
//
// Mat rvec1, tvec1;
// cv::solvePnP(objectPoints, corners1, cameraMatrix, distCoeffs, rvec1, tvec1);
// Mat rvec2, tvec2;
// cv::solvePnP(objectPoints, corners2, cameraMatrix, distCoeffs, rvec2, tvec2);
Mat img_RR_matches = LeftUpRightDownDrawInlier(img1, img2, newp1c, newp2c);
Mat img_CCC_matches = LeftUpRightDownDrawInlier(img1_ori, img2_ori, newp1, newp2);
//https://docs.opencv.org/3.0-beta/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#findessentialmat
//https://docs.opencv.org/3.4.8/d9/d0c/group__calib3d.html#ga13f7e34de8fa516a686a56af1196247f
std::vector<unsigned char> EssentialMask(newp1.size());
cv::Mat intrinsics = (cv::Mat_<float>(3, 3) << 2269.16, 0, 1065.54, 0, 2268.4, 799.032, 0, 0, 1);
Mat Essential = findEssentialMat(newp1, newp2, intrinsics, cv::RANSAC, 0.999, 5, EssentialMask);
cv::Mat OriginalDepthImage = cv::imread("../data/OtherSampleFrame_IMG_DepthMap_48.tif", -1);
cv::Mat targetDepthImage = cv::imread("../data//OtherSampleFrame_IMG_DepthMap_52.tif", -1);
std::vector<cv::Point3f> trp1;
std::vector<cv::Point3f> trp2;
std::vector<cv::Point2f> trp1_temp;
std::vector<cv::Point2f> trp2_temp;
for (size_t key=0; key<newp1.size(); key++)
{
float x1 = newp1[key].x;
float y1 = newp1[key].y;
float x2 = newp2[key].x;
float y2 = newp2[key].y;
float d1 = OriginalDepthImage.at<float>(int(y1),int(x1));
cv::Point3f p1;
p1.z = float(d1) / intrinsics.ptr<float>(2)[2];
p1.x = (x1 - intrinsics.ptr<float>(0)[2]) * p1.z / intrinsics.ptr<float>(0)[0];
p1.y = (y1 - intrinsics.ptr<float>(1)[2]) * p1.z / intrinsics.ptr<float>(1)[1];
float d2 = targetDepthImage.at<float>(int(y2),int(x2));
cv::Point3f p2;
p2.z = float(d2) / intrinsics.ptr<float>(2)[2];
p2.x = (x2 - intrinsics.ptr<float>(0)[2]) * p2.z / intrinsics.ptr<float>(0)[0];
p2.y = (y2 - intrinsics.ptr<float>(1)[2]) * p2.z / intrinsics.ptr<float>(1)[1];
//>0.001是为了去除深度为0的特征点
if(EssentialMask[key] and ((abs(p1.x) + abs(p1.y) + abs(p1.z)) > 0.001) and ((abs(p2.x) + abs(p2.y) + abs(p2.z)) > 0.001))
{
trp1.push_back(p1);
trp2.push_back(p2);
trp1_temp.push_back(newp1[key]);
trp2_temp.push_back(newp2[key]);
}
}
Mat img_LL_matches = LeftUpRightDownDrawInlier(img1_ori, img2_ori, trp1_temp, trp2_temp);
// draw matching
imwrite("../data/a.jpg",show);
imwrite("../data/b.jpg",img_RR_matches);
imwrite("../data/b1.jpg",img_CCC_matches);
imwrite("../data/c.jpg",img_LL_matches);
cout << "ICP start" << endl;
Mat T, R, t;
best_fit_transform(trp1, trp2, T, R, t);
// The point clouds we will be using
PointCloudT::Ptr cloud_in (new PointCloudT); // Original point cloud
PointCloudT::Ptr cloud_in_48 (new PointCloudT); // Original point cloud
PointCloudT::Ptr cloud_tr (new PointCloudT); // Transformed point cloud
PointCloudT::Ptr cloud_icp (new PointCloudT); // ICP output point cloud
float leafsize = 2;
int iterations = 10; // Default number of ICP iterations
pcl::console::TicToc time;
time.tic ();
if (pcl::io::loadPLYFile ("/home/spple/CLionProjects/python_asift_gpu/save_ply/SampleFrame_52.ply", *cloud_in) < 0)
{
PCL_ERROR ("Error loading cloud %s.\n", "SampleFrame_52.ply");
return (-1);
}
std::cout << "\nLoaded file " << "SampleFrame_52.ply" << " (" << cloud_in->size () << " points) in " << time.toc () << " ms\n" << std::endl;
time.tic ();
if (pcl::io::loadPLYFile ("/home/spple/CLionProjects/python_asift_gpu/save_ply/SampleFrame_48.ply", *cloud_in_48) < 0)
{
PCL_ERROR ("Error loading cloud %s.\n", "SampleFrame_48.ply");
return (-1);
}
std::cout << "\nLoaded file " << "SampleFrame_48.ply" << " (" << cloud_in_48->size () << " points) in " << time.toc () << " ms\n" << std::endl;
if (leafsize > 0)
{
voxel_grid_filter(cloud_in, leafsize);
voxel_grid_filter(cloud_in_48, leafsize);
}
time.tic ();
std::cout << "\nLoaded file " << "SampleFrame_52.ply" << " (" << cloud_in_48->size () << " points) in " << time.toc () << " ms\n" << std::endl;
// Defining a rotation matrix and translation vector
Eigen::Matrix4d transformation_matrix = Eigen::Matrix4d::Identity ();
// A rotation matrix (see https://en.wikipedia.org/wiki/Rotation_matrix)
transformation_matrix (0, 0) = R.at<float>(0,0);
transformation_matrix (0, 1) = R.at<float>(0,1);
transformation_matrix (0, 2) = R.at<float>(0,2);
transformation_matrix (1, 0) = R.at<float>(1,0);
transformation_matrix (1, 1) = R.at<float>(1,1);
transformation_matrix (1, 2) = R.at<float>(1,2);
transformation_matrix (2, 0) = R.at<float>(2,0);
transformation_matrix (2, 1) = R.at<float>(2,1);
transformation_matrix (2, 2) = R.at<float>(2,2);
transformation_matrix (0, 3) = t.at<float>(0,0);
transformation_matrix (1, 3) = t.at<float>(0,1);
transformation_matrix (2, 3) = t.at<float>(0,2);
// Display in terminal the transformation matrix
std::cout << "Applying this rigid transformation to: cloud_in -> cloud_icp" << std::endl;
print4x4Matrix (transformation_matrix);
//把48(cloud_in_48)通过初始Rt矩阵进行变换,变换后的48‘(cloud_in)为原点云,去匹配目标点云52(cloud_in)
// Executing the transformation
pcl::transformPointCloudWithNormals (*cloud_in_48, *cloud_icp, transformation_matrix);
//pcl::transformPointCloud (*cloud_in_48, *cloud_icp, transformation_matrix);
*cloud_tr = *cloud_icp; // We backup cloud_icp into cloud_tr for later use
// The Iterative Closest Point algorithm
time.tic ();
// pcl::IterativeClosestPointWithNormals<pcl::PointXYZRGBNormal, pcl::PointXYZRGBNormal>::Ptr icp ( new pcl::IterativeClosestPointWithNormals<pcl::PointXYZRGBNormal, pcl::PointXYZRGBNormal> () );
// icp->setMaximumIterations ( iterations );
// icp->setInputSource ( cloud_icp ); // not cloud_source, but cloud_source_trans!
// icp->setInputTarget ( cloud_in );
// icp->setMaxCorrespondenceDistance ( 5 );
// icp->align ( *cloud_icp );
// icp->setMaximumIterations (1);
pcl::IterativeClosestPoint<PointT, PointT> icp;
icp.setMaximumIterations (iterations);
icp.setMaxCorrespondenceDistance(5);
icp.setInputSource (cloud_icp);
icp.setInputTarget (cloud_in);
icp.align (*cloud_icp);
icp.setMaximumIterations (1); // We set this variable to 1 for the next time we will call .align () function
std::cout << "Applied " << iterations << " ICP iteration(s) in " << time.toc () << " ms" << std::endl;
if (icp.hasConverged ())
{
std::cout << "\nICP has converged, score is " << icp.getFitnessScore () << std::endl;
std::cout << "\nICP transformation " << iterations << " : cloud_icp -> cloud_in" << std::endl;
transformation_matrix = icp.getFinalTransformation ().cast<double>();
print4x4Matrix (transformation_matrix);
}
else
{
PCL_ERROR ("\nICP has not converged.\n");
return (-1);
}
// Visualization
boost::shared_ptr< pcl::visualization::PCLVisualizer > viewer(new pcl::visualization::PCLVisualizer("ICP demo"));
//初始化相机参数
viewer->initCameraParameters ();
// Create two vertically separated viewports
int v1 (0);
int v2 (1);
viewer->createViewPort (0.0, 0.0, 0.5, 1.0, v1);
viewer->createViewPort (0.5, 0.0, 1.0, 1.0, v2);
// The color we will be using
float bckgr_gray_level = 0.0; // Black
float txt_gray_lvl = 1.0 - bckgr_gray_level;
// Original point cloud is white
pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_in_color_h (cloud_in, (int) 255 * txt_gray_lvl, (int) 255 * txt_gray_lvl,
(int) 255 * txt_gray_lvl);
viewer->addPointCloud (cloud_in, cloud_in_color_h, "cloud_in_v1", v1);
viewer->addPointCloud (cloud_in, cloud_in_color_h, "cloud_in_v2", v2);
// Transformed point cloud is green
pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_tr_color_h (cloud_tr, 20, 180, 20);
viewer->addPointCloud (cloud_tr, cloud_tr_color_h, "cloud_tr_v1", v1);
// Transformed point cloud is blue
pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_in48_color_h (cloud_in_48, 20, 20, 180);
viewer->addPointCloud (cloud_in_48, cloud_in48_color_h, "cloud_in48_v1", v1);
// ICP aligned point cloud is red
pcl::visualization::PointCloudColorHandlerCustom<PointT> cloud_icp_color_h (cloud_icp, 180, 20, 20);
viewer->addPointCloud (cloud_icp, cloud_icp_color_h, "cloud_icp_v2", v2);
// Adding text descriptions in each viewport
viewer->addText ("White: Original point cloud\nGreen: Matrix transformed point cloud", 10, 15, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "icp_info_1", v1);
viewer->addText ("White: Original point cloud\nRed: ICP aligned point cloud", 10, 15, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "icp_info_2", v2);
std::stringstream ss;
ss << iterations;
std::string iterations_cnt = "ICP iterations = " + ss.str ();
viewer->addText (iterations_cnt, 10, 60, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "iterations_cnt", v2);
// Set background color
viewer->setBackgroundColor (bckgr_gray_level, bckgr_gray_level, bckgr_gray_level, v1);
viewer->setBackgroundColor (bckgr_gray_level, bckgr_gray_level, bckgr_gray_level, v2);
//添加坐标系
viewer->addCoordinateSystem(1.0, "reference");
// Set camera position and orientation //设置坐标原点
//https://www.cnblogs.com/ghjnwk/p/10305796.html
//鼠标左键,鼠标右键,鼠标滚轮,按住滚轮不放
viewer->setCameraPosition (0, 0, 0, 0, 0, 0, 0);
viewer->setSize (2048, 2048); // Visualiser window size
// Register keyboard callback :
viewer->registerKeyboardCallback (&keyboardEventOccurred, (void*) NULL);
// Display the visualiser
while (!viewer->wasStopped ())
{
viewer->spinOnce ();
// The user pressed "space" :
if (next_iteration)
{
// The Iterative Closest Point algorithm
time.tic ();
icp.align (*cloud_icp);
std::cout << "Applied 1 ICP iteration in " << time.toc () << " ms" << std::endl;
if (icp.hasConverged ())
{
printf ("\033[11A"); // Go up 11 lines in terminal output.
printf ("\nICP has converged, score is %+.0e\n", icp.getFitnessScore ());
std::cout << "\nICP transformation " << ++iterations << " : cloud_icp -> cloud_in" << std::endl;
transformation_matrix *= icp.getFinalTransformation ().cast<double>(); // WARNING /!\ This is not accurate! For "educational" purpose only!
print4x4Matrix (transformation_matrix); // Print the transformation between original pose and current pose
ss.str ("");
ss << iterations;
std::string iterations_cnt = "ICP iterations = " + ss.str ();
viewer->updateText (iterations_cnt, 10, 60, 16, txt_gray_lvl, txt_gray_lvl, txt_gray_lvl, "iterations_cnt");
viewer->updatePointCloud (cloud_icp, cloud_icp_color_h, "cloud_icp_v2");
}
else
{
PCL_ERROR ("\nICP has not converged.\n");
return (-1);
}
}
next_iteration = false;
}
}
Mat DrawInlier(Mat &src1, Mat &src2, vector<KeyPoint> &kpt1, vector<KeyPoint> &kpt2, vector<DMatch> &inlier, int type) {
const int height = max(src1.rows, src2.rows);
const int width = src1.cols + src2.cols;
Mat output(height, width, CV_8UC3, Scalar(0, 0, 0));
src1.copyTo(output(Rect(0, 0, src1.cols, src1.rows)));
src2.copyTo(output(Rect(src1.cols, 0, src2.cols, src2.rows)));
if (type == 1)
{
for (size_t i = 0; i < inlier.size(); i++)
{
Point2f left = kpt1[inlier[i].queryIdx].pt;
Point2f right = (kpt2[inlier[i].trainIdx].pt + Point2f((float)src1.cols, 0.f));
line(output, left, right, Scalar(0, 255, 255));
}
}
else if (type == 2)
{
for (size_t i = 0; i < inlier.size(); i++)
{
Point2f left = kpt1[inlier[i].queryIdx].pt;
Point2f right = (kpt2[inlier[i].trainIdx].pt + Point2f((float)src1.cols, 0.f));
line(output, left, right, Scalar(255, 0, 0));
}
for (size_t i = 0; i < inlier.size(); i++)
{
Point2f left = kpt1[inlier[i].queryIdx].pt;
Point2f right = (kpt2[inlier[i].trainIdx].pt + Point2f((float)src1.cols, 0.f));
circle(output, left, 1, Scalar(0, 255, 255), 2);
circle(output, right, 1, Scalar(0, 255, 0), 2);
}
}
return output;
}