二维特征框架——AKAZE 本地特征匹配 OpenCV v4.8.0

上一个教程检测平面物体

下一个教程AKAZE 和 ORB 平面跟踪

原作者Fedor Morozov
兼容性OpenCV >= 3.0

简介

在本教程中,我们将学习如何使用 AKAZE [10] 局部特征来检测和匹配两幅图像上的关键点。我们将在给定同构矩阵的一对图像上找到关键点,将它们匹配起来,并计算离群值(即符合给定同构的匹配值)的数量。

您可以在此处找到此示例的扩展版本: https://github.com/pablofdezalc/test_kaze_akaze_opencv

数据

我们将使用牛津涂鸦序列数据集中的图像 1 和 3。

在这里插入图片描述

同源性由一个 3 乘 3 矩阵给出:

7.6285898e-01 -2.9922929e-01 2.2567123e+02
3.3443473e-01 1.0143901e+00 -7.6999973e+01
3.4663091e-04 -1.4364524e-05 1.0000000e+00

您可以在 opencv/samples/data/ 中找到图像(graf1.png、graf3.png)和同源图(H1to3p.xml)。

源代码

C++

  • 可下载代码: 单击此处
  • 代码一览
#include <opencv2/features2d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
const float inlier_threshold = 2.5f; // 用同源性检查识别离群值的距离阈值
const float nn_match_ratio = 0.8f; // 最近邻匹配率
int main(int argc, char* argv[])
{
 CommandLineParser parser(argc, argv,
 "{@img1 | graf1.png | input image 1}"
 "{@img2 | graf3.png | input image 2}"
 "{@homography | H1to3p.xml | homography matrix}");
 Mat img1 = imread( samples::findFile( parser.get<String>("@img1") ), IMREAD_GRAYSCALE);
 Mat img2 = imread( samples::findFile( parser.get<String>("@img2") ), IMREAD_GRAYSCALE);
 Mat homography;
 FileStorage fs( samples::findFile( parser.get<String>("@homography") ), FileStorage::READ);
 fs.getFirstTopLevelNode() >> homography;
 vector<KeyPoint> kpts1, kpts2;
 Mat desc1, desc2;
 Ptr<AKAZE> akaze = AKAZE::create();
 akaze->detectAndCompute(img1, noArray(), kpts1, desc1);
 akaze->detectAndCompute(img2, noArray(), kpts2, desc2);
 BFMatcher matcher(NORM_HAMMING);
 vector< vector<DMatch> > nn_matches;
 matcher.knnMatch(desc1, desc2, nn_matches, 2);
 vector<KeyPoint> matched1, matched2;
 for(size_t i = 0; i < nn_matches.size(); i++) {
 DMatch first = nn_matches[i][0];
 float dist1 = nn_matches[i][0].distance;
 float dist2 = nn_matches[i][1].distance;
 if(dist1 < nn_match_ratio * dist2) {
 matched1.push_back(kpts1[first.queryIdx]);
 matched2.push_back(kpts2[first.trainIdx]);
 }
 }
 vector<DMatch> good_matches;
 vector<KeyPoint> inliers1, inliers2;
 for(size_t i = 0; i < matched1.size(); i++) {
 Mat col = Mat::ones(3, 1, CV_64F);
 col.at<double>(0) = matched1[i].pt.x;
 col.at<double>(1) = matched1[i].pt.y;
 col = homography * col;
 col /= col.at<double>(2);
 double dist = sqrt( pow(col.at<double>(0) - matched2[i].pt.x, 2) +
 pow(col.at<double>(1) - matched2[i].pt.y, 2));
 if(dist < inlier_threshold) {
 int new_i = static_cast<int>(inliers1.size());
 inliers1.push_back(matched1[i]);
 inliers2.push_back(matched2[i]);
 good_matches.push_back(DMatch(new_i, new_i, 0));
 }
 }
 Mat res;
 drawMatches(img1, inliers1, img2, inliers2, good_matches, res);
 imwrite("akaze_result.png", res);
 double inlier_ratio = inliers1.size() / (double) matched1.size();
 cout << "A-KAZE Matching Results" << endl;
 cout << "*******************************" << endl;
 cout << "# Keypoints 1: \t" << kpts1.size() << endl;
 cout << "# Keypoints 2: \t" << kpts2.size() << endl;
 cout << "# Matches: \t" << matched1.size() << endl;
 cout << "# Inliers: \t" << inliers1.size() << endl;
 cout << "# Inliers Ratio: \t" << inlier_ratio << endl;
 cout << endl;
 imshow("result", res);
 waitKey();
 return 0;
}

Java

  • 可下载代码: 单击此处
  • 代码一览
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.DMatch;
import org.opencv.core.KeyPoint;
import org.opencv.core.Mat;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Scalar;
import org.opencv.features2d.AKAZE;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.Features2d;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.w3c.dom.Document;
import org.xml.sax.SAXException;
class AKAZEMatch {
 public void run(String[] args) {
 String filename1 = args.length > 2 ? args[0] : "../data/graf1.png";
 String filename2 = args.length > 2 ? args[1] : "../data/graf3.png";
 String filename3 = args.length > 2 ? args[2] : "../data/H1to3p.xml";
 Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.IMREAD_GRAYSCALE);
 Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.IMREAD_GRAYSCALE);
 if (img1.empty() || img2.empty()) {
 System.err.println("Cannot read images!");
 System.exit(0);
 }
 File file = new File(filename3);
 DocumentBuilderFactory documentBuilderFactory = DocumentBuilderFactory.newInstance();
 DocumentBuilder documentBuilder;
 Document document;
 Mat homography = new Mat(3, 3, CvType.CV_64F);
 double[] homographyData = new double[(int) (homography.total()*homography.channels())];
 try {
 documentBuilder = documentBuilderFactory.newDocumentBuilder();
 document = documentBuilder.parse(file);
 String homographyStr = document.getElementsByTagName("data").item(0).getTextContent();
 String[] splited = homographyStr.split("\\s+");
 int idx = 0;
 for (String s : splited) {
 if (!s.isEmpty()) {
 homographyData[idx] = Double.parseDouble(s);
 idx++;
 }
 }
 } catch (ParserConfigurationException e) {
 e.printStackTrace();
 System.exit(0);
 } catch (SAXException e) {
 e.printStackTrace();
 System.exit(0);
 } catch (IOException e) {
 e.printStackTrace();
 System.exit(0);
 }
 homography.put(0, 0, homographyData);
 AKAZE akaze = AKAZE.create();
 MatOfKeyPoint kpts1 = new MatOfKeyPoint(), kpts2 = new MatOfKeyPoint();
 Mat desc1 = new Mat(), desc2 = new Mat();
 akaze.detectAndCompute(img1, new Mat(), kpts1, desc1);
 akaze.detectAndCompute(img2, new Mat(), kpts2, desc2);
 DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
 List<MatOfDMatch> knnMatches = new ArrayList<>();
 matcher.knnMatch(desc1, desc2, knnMatches, 2);
 float ratioThreshold = 0.8f; // 近邻匹配比率
 List<KeyPoint> listOfMatched1 = new ArrayList<>();
 List<KeyPoint> listOfMatched2 = new ArrayList<>();
 List<KeyPoint> listOfKeypoints1 = kpts1.toList();
 List<KeyPoint> listOfKeypoints2 = kpts2.toList();
 for (int i = 0; i < knnMatches.size(); i++) {
 DMatch[] matches = knnMatches.get(i).toArray();
 float dist1 = matches[0].distance;
 float dist2 = matches[1].distance;
 if (dist1 < ratioThreshold * dist2) {
 listOfMatched1.add(listOfKeypoints1.get(matches[0].queryIdx));
 listOfMatched2.add(listOfKeypoints2.get(matches[0].trainIdx));
 }
 }
 double inlierThreshold = 2.5; // 通过同源性检查识别离群值的距离阈值
 List<KeyPoint> listOfInliers1 = new ArrayList<>();
 List<KeyPoint> listOfInliers2 = new ArrayList<>();
 List<DMatch> listOfGoodMatches = new ArrayList<>();
 for (int i = 0; i < listOfMatched1.size(); i++) {
 Mat col = new Mat(3, 1, CvType.CV_64F);
 double[] colData = new double[(int) (col.total() * col.channels())];
 colData[0] = listOfMatched1.get(i).pt.x;
 colData[1] = listOfMatched1.get(i).pt.y;
 colData[2] = 1.0;
 col.put(0, 0, colData);
 Mat colRes = new Mat();
 Core.gemm(homography, col, 1.0, new Mat(), 0.0, colRes);
 colRes.get(0, 0, colData);
 Core.multiply(colRes, new Scalar(1.0 / colData[2]), col);
 col.get(0, 0, colData);
 double dist = Math.sqrt(Math.pow(colData[0] - listOfMatched2.get(i).pt.x, 2) +
 Math.pow(colData[1] - listOfMatched2.get(i).pt.y, 2));
 if (dist < inlierThreshold) {
 listOfGoodMatches.add(new DMatch(listOfInliers1.size(), listOfInliers2.size(), 0));
 listOfInliers1.add(listOfMatched1.get(i));
 listOfInliers2.add(listOfMatched2.get(i));
 }
 }
 Mat res = new Mat();
 MatOfKeyPoint inliers1 = new MatOfKeyPoint(listOfInliers1.toArray(new KeyPoint[listOfInliers1.size()]));
 MatOfKeyPoint inliers2 = new MatOfKeyPoint(listOfInliers2.toArray(new KeyPoint[listOfInliers2.size()]));
 MatOfDMatch goodMatches = new MatOfDMatch(listOfGoodMatches.toArray(new DMatch[listOfGoodMatches.size()]));
 Features2d.drawMatches(img1, inliers1, img2, inliers2, goodMatches, res);
 Imgcodecs.imwrite("akaze_result.png", res);
 double inlierRatio = listOfInliers1.size() / (double) listOfMatched1.size();
 System.out.println("A-KAZE Matching Results");
 System.out.println("*******************************");
 System.out.println("# Keypoints 1: \t" + listOfKeypoints1.size());
 System.out.println("# Keypoints 2: \t" + listOfKeypoints2.size());
 System.out.println("# Matches: \t" + listOfMatched1.size());
 System.out.println("# Inliers: \t" + listOfInliers1.size());
 System.out.println("# Inliers Ratio: \t" + inlierRatio);
 HighGui.imshow("result", res);
 HighGui.waitKey();
 System.exit(0);
 }
}
public class AKAZEMatchDemo {
 public static void main(String[] args) {
 // 加载本地 OpenCV 库
 System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
 new AKAZEMatch().run(args);
 }
}

Python

  • 可下载代码: 单击此处
  • 代码一览
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
from math import sqrt
parser = argparse.ArgumentParser(description='Code for AKAZE local features matching tutorial.')
parser.add_argument('--input1', help='Path to input image 1.', default='graf1.png')
parser.add_argument('--input2', help='Path to input image 2.', default='graf3.png')
parser.add_argument('--homography', help='Path to the homography matrix.', default='H1to3p.xml')
args = parser.parse_args()
img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE)
img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE)
if img1 is None or img2 is None:
 print('Could not open or find the images!')
 exit(0)
fs = cv.FileStorage(cv.samples.findFile(args.homography), cv.FILE_STORAGE_READ)
homography = fs.getFirstTopLevelNode().mat()
akaze = cv.AKAZE_create()
kpts1, desc1 = akaze.detectAndCompute(img1, None)
kpts2, desc2 = akaze.detectAndCompute(img2, None)
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE_HAMMING)
nn_matches = matcher.knnMatch(desc1, desc2, 2)
matched1 = []
matched2 = []
nn_match_ratio = 0.8 # 近邻匹配率
for m, n in nn_matches:
 if m.distance < nn_match_ratio * n.distance:
 matched1.append(kpts1[m.queryIdx])
 matched2.append(kpts2[m.trainIdx])
inliers1 = []
inliers2 = []
good_matches = []
inlier_threshold = 2.5 # 用同源性检查识别离群值的距离阈值
for i, m in enumerate(matched1):
 col = np.ones((3,1), dtype=np.float64)
 col[0:2,0] = m.pt
 col = np.dot(homography, col)
 col /= col[2,0]
 dist = sqrt(pow(col[0,0] - matched2[i].pt[0], 2) +\
 pow(col[1,0] - matched2[i].pt[1], 2))
 if dist < inlier_threshold:
 good_matches.append(cv.DMatch(len(inliers1), len(inliers2), 0))
 inliers1.append(matched1[i])
 inliers2.append(matched2[i])
res = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)
cv.drawMatches(img1, inliers1, img2, inliers2, good_matches, res)
cv.imwrite("akaze_result.png", res)
inlier_ratio = len(inliers1) / float(len(matched1))
print('A-KAZE Matching Results')
print('*******************************')
print('# Keypoints 1: \t', len(kpts1))
print('# Keypoints 2: \t', len(kpts2))
print('# Matches: \t', len(matched1))
print('# Inliers: \t', len(inliers1))
print('# Inliers Ratio: \t', inlier_ratio)
cv.imshow('result', res)
cv.waitKey()

说明

  • 加载图像和同源图像

C++

 CommandLineParser parser(argc, argv,
 "{@img1 | graf1.png | input image 1}"
 "{@img2 | graf3.png | input image 2}"
 "{@homography | H1to3p.xml | homography matrix}");
 Mat img1 = imread( samples::findFile( parser.get<String>("@img1") ), IMREAD_GRAYSCALE);
 Mat img2 = imread( samples::findFile( parser.get<String>("@img2") ), IMREAD_GRAYSCALE);
 Mat homography;
 FileStorage fs( samples::findFile( parser.get<String>("@homography") ), FileStorage::READ);
 fs.getFirstTopLevelNode() >> homography;

Java

 String filename1 = args.length > 2 ? args[0] : "../data/graf1.png";
 String filename2 = args.length > 2 ? args[1] : "../data/graf3.png";
 String filename3 = args.length > 2 ? args[2] : "../data/H1to3p.xml";
 Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.IMREAD_GRAYSCALE);
 Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.IMREAD_GRAYSCALE);
 if (img1.empty() || img2.empty()) {
 System.err.println("Cannot read images!");
 System.exit(0);
 }
 File file = new File(filename3);
 DocumentBuilderFactory documentBuilderFactory = DocumentBuilderFactory.newInstance();
 DocumentBuilder documentBuilder;
 Document document;
 Mat homography = new Mat(3, 3, CvType.CV_64F);
 double[] homographyData = new double[(int) (homography.total()*homography.channels())];
 try {
 documentBuilder = documentBuilderFactory.newDocumentBuilder();
 document = documentBuilder.parse(file);
 String homographyStr = document.getElementsByTagName("data").item(0).getTextContent();
 String[] splited = homographyStr.split("\\s+");
 int idx = 0;
 for (String s : splited) {
 if (!s.isEmpty()) {
 homographyData[idx] = Double.parseDouble(s);
 idx++;
 }
 }
 } catch (ParserConfigurationException e) {
 e.printStackTrace();
 System.exit(0);
 } catch (SAXException e) {
 e.printStackTrace();
 System.exit(0);
 } catch (IOException e) {
 e.printStackTrace();
 System.exit(0);
 }
 homography.put(0, 0, homographyData);

Python

parser = argparse.ArgumentParser(description='Code for AKAZE local features matching tutorial.')
parser.add_argument('--input1', help='Path to input image 1.', default='graf1.png')
parser.add_argument('--input2', help='Path to input image 2.', default='graf3.png')
parser.add_argument('--homography', help='Path to the homography matrix.', default='H1to3p.xml')
args = parser.parse_args()
img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE)
img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE)
if img1 is None or img2 is None:
 print('Could not open or find the images!')
 exit(0)
fs = cv.FileStorage(cv.samples.findFile(args.homography), cv.FILE_STORAGE_READ)
homography = fs.getFirstTopLevelNode().mat()

我们正在加载灰度图像。同源图像存储在用 FileStorage 创建的 xml 中。

  • 使用 AKAZE 检测关键点并计算描述符

C++

 vector<KeyPoint> kpts1, kpts2;
 Mat desc1, desc2;
 Ptr<AKAZE> akaze = AKAZE::create();
 akaze->detectAndCompute(img1, noArray(), kpts1, desc1);
 akaze->detectAndCompute(img2, noArray(), kpts2, desc2);

Java

 AKAZE akaze = AKAZE.create();
 MatOfKeyPoint kpts1 = new MatOfKeyPoint(), kpts2 = new MatOfKeyPoint();
 Mat desc1 = new Mat(), desc2 = new Mat();
 akaze.detectAndCompute(img1, new Mat(), kpts1, desc1);
 akaze.detectAndCompute(img2, new Mat(), kpts2, desc2);

Python

akaze = cv.AKAZE_create()
kpts1, desc1 = akaze.detectAndCompute(img1, None)
kpts2, desc2 = akaze.detectAndCompute(img2, None)

我们创建 AKAZE 并检测和计算 AKAZE 关键点和描述符。由于我们不需要掩码参数,因此使用 noArray()

  • 使用暴力匹配器查找 2-nn 匹配项

C++

 BFMatcher matcher(NORM_HAMMING);
 vector< vector<DMatch> > nn_matches;
 matcher.knnMatch(desc1, desc2, nn_matches, 2);

Java

 DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
 List<MatOfDMatch> knnMatches = new ArrayList<>();
 matcher.knnMatch(desc1, desc2, knnMatches, 2);

Python

matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE_HAMMING)
nn_matches = matcher.knnMatch(desc1, desc2, 2)

我们使用Hamming距离,因为 AKAZE 默认使用二进制描述符。

  • 使用 2-nn 匹配和比率标准找到正确的关键点匹配

C++

 vector<KeyPoint> matched1, matched2;
 for(size_t i = 0; i < nn_matches.size(); i++) {
 DMatch first = nn_matches[i][0];
 float dist1 = nn_matches[i][0].distance;
 float dist2 = nn_matches[i][1].distance;
 if(dist1 < nn_match_ratio * dist2) {
 matched1.push_back(kpts1[first.queryIdx]);
 matched2.push_back(kpts2[first.trainIdx]);
 }
 }

Java

 float ratioThreshold = 0.8f; // Nearest neighbor matching ratio
 List<KeyPoint> listOfMatched1 = new ArrayList<>();
 List<KeyPoint> listOfMatched2 = new ArrayList<>();
 List<KeyPoint> listOfKeypoints1 = kpts1.toList();
 List<KeyPoint> listOfKeypoints2 = kpts2.toList();
 for (int i = 0; i < knnMatches.size(); i++) {
 DMatch[] matches = knnMatches.get(i).toArray();
 float dist1 = matches[0].distance;
 float dist2 = matches[1].distance;
 if (dist1 < ratioThreshold * dist2) {
 listOfMatched1.add(listOfKeypoints1.get(matches[0].queryIdx));
 listOfMatched2.add(listOfKeypoints2.get(matches[0].trainIdx));
 }
 }

Python

matched1 = []
matched2 = []
nn_match_ratio = 0.8 # Nearest neighbor matching ratio
for m, n in nn_matches:
 if m.distance < nn_match_ratio * n.distance:
 matched1.append(kpts1[m.queryIdx])
 matched2.append(kpts2[m.trainIdx])

如果最近的匹配距离明显小于第二近的匹配距离,则说明匹配是正确的(匹配没有歧义)。

  • 检查我们的匹配是否符合同源模型

C++

 vector<DMatch> good_matches;
 vector<KeyPoint> inliers1, inliers2;
 for(size_t i = 0; i < matched1.size(); i++) {
 Mat col = Mat::ones(3, 1, CV_64F);
 col.at<double>(0) = matched1[i].pt.x;
 col.at<double>(1) = matched1[i].pt.y;
 col = homography * col;
 col /= col.at<double>(2);
 double dist = sqrt( pow(col.at<double>(0) - matched2[i].pt.x, 2) +
 pow(col.at<double>(1) - matched2[i].pt.y, 2));
 if(dist < inlier_threshold) {
 int new_i = static_cast<int>(inliers1.size());
 inliers1.push_back(matched1[i]);
 inliers2.push_back(matched2[i]);
 good_matches.push_back(DMatch(new_i, new_i, 0));
 }
 }

Java

 double inlierThreshold = 2.5; // Distance threshold to identify inliers with homography check
 List<KeyPoint> listOfInliers1 = new ArrayList<>();
 List<KeyPoint> listOfInliers2 = new ArrayList<>();
 List<DMatch> listOfGoodMatches = new ArrayList<>();
 for (int i = 0; i < listOfMatched1.size(); i++) {
 Mat col = new Mat(3, 1, CvType.CV_64F);
 double[] colData = new double[(int) (col.total() * col.channels())];
 colData[0] = listOfMatched1.get(i).pt.x;
 colData[1] = listOfMatched1.get(i).pt.y;
 colData[2] = 1.0;
 col.put(0, 0, colData);
 Mat colRes = new Mat();
 Core.gemm(homography, col, 1.0, new Mat(), 0.0, colRes);
 colRes.get(0, 0, colData);
 Core.multiply(colRes, new Scalar(1.0 / colData[2]), col);
 col.get(0, 0, colData);
 double dist = Math.sqrt(Math.pow(colData[0] - listOfMatched2.get(i).pt.x, 2) +
 Math.pow(colData[1] - listOfMatched2.get(i).pt.y, 2));
 if (dist < inlierThreshold) {
 listOfGoodMatches.add(new DMatch(listOfInliers1.size(), listOfInliers2.size(), 0));
 listOfInliers1.add(listOfMatched1.get(i));
 listOfInliers2.add(listOfMatched2.get(i));
 }
 }

Python

inliers1 = []
inliers2 = []
good_matches = []
inlier_threshold = 2.5 # Distance threshold to identify inliers with homography check
for i, m in enumerate(matched1):
 col = np.ones((3,1), dtype=np.float64)
 col[0:2,0] = m.pt
 col = np.dot(homography, col)
 col /= col[2,0]
 dist = sqrt(pow(col[0,0] - matched2[i].pt[0], 2) +\
 pow(col[1,0] - matched2[i].pt[1], 2))
 if dist < inlier_threshold:
 good_matches.append(cv.DMatch(len(inliers1), len(inliers2), 0))
 inliers1.append(matched1[i])
 inliers2.append(matched2[i])

如果第一个关键点到第二个关键点的投影距离小于阈值,则符合同构模型。

我们会为离群值创建一组新的匹配值,因为绘制函数需要它。

  • 输出结果

C++

 Mat res;
 drawMatches(img1, inliers1, img2, inliers2, good_matches, res);
 imwrite("akaze_result.png", res);
 double inlier_ratio = inliers1.size() / (double) matched1.size();
 cout << "A-KAZE Matching Results" << endl;
 cout << "*******************************" << endl;
 cout << "# Keypoints 1: \t" << kpts1.size() << endl;
 cout << "# Keypoints 2: \t" << kpts2.size() << endl;
 cout << "# Matches: \t" << matched1.size() << endl;
 cout << "# Inliers: \t" << inliers1.size() << endl;
 cout << "# Inliers Ratio: \t" << inlier_ratio << endl;
 cout << endl;
 imshow("result", res);
 waitKey();

Java

 Mat res = new Mat();
 MatOfKeyPoint inliers1 = new MatOfKeyPoint(listOfInliers1.toArray(new KeyPoint[listOfInliers1.size()]));
 MatOfKeyPoint inliers2 = new MatOfKeyPoint(listOfInliers2.toArray(new KeyPoint[listOfInliers2.size()]));
 MatOfDMatch goodMatches = new MatOfDMatch(listOfGoodMatches.toArray(new DMatch[listOfGoodMatches.size()]));
 Features2d.drawMatches(img1, inliers1, img2, inliers2, goodMatches, res);
 Imgcodecs.imwrite("akaze_result.png", res);
 double inlierRatio = listOfInliers1.size() / (double) listOfMatched1.size();
 System.out.println("A-KAZE Matching Results");
 System.out.println("*******************************");
 System.out.println("# Keypoints 1: \t" + listOfKeypoints1.size());
 System.out.println("# Keypoints 2: \t" + listOfKeypoints2.size());
 System.out.println("# Matches: \t" + listOfMatched1.size());
 System.out.println("# Inliers: \t" + listOfInliers1.size());
 System.out.println("# Inliers Ratio: \t" + inlierRatio);
 HighGui.imshow("result", res);
 HighGui.waitKey();

Python

res = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)
cv.drawMatches(img1, inliers1, img2, inliers2, good_matches, res)
cv.imwrite("akaze_result.png", res)
inlier_ratio = len(inliers1) / float(len(matched1))
print('A-KAZE Matching Results')
print('*******************************')
print('# Keypoints 1: \t', len(kpts1))
print('# Keypoints 2: \t', len(kpts2))
print('# Matches: \t', len(matched1))
print('# Inliers: \t', len(inliers1))
print('# Inliers Ratio: \t', inlier_ratio)
cv.imshow('result', res)
cv.waitKey()

这里我们保存生成的图像,并打印一些统计数据。

结果

找到的匹配
在这里插入图片描述

根据您的 OpenCV 版本,您应该会得到与以下结果一致的结果:

Keypoints 1: 2943
Keypoints 2: 3511
Matches: 447
Inliers: 308
Inlier Ratio: 0.689038
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值