二维特征框架——利用特征 2D + 同源性技术查找已知对象 OpenCV v4.8.0

上一个教程使用 FLANN 进行特征匹配

下一个教程平面物体的检测

原作者Ana Huamán
兼容性OpenCV >= 3.0

目标

在本教程中,你将学习如何

警告
您需要使用 OpenCV contrib 模块才能使用 SURF 特征(替代方法有 ORB、KAZE…特征)。

代码

C++
本教程代码如下所示。您也可以从此处下载

#include <iostream>
#include "opencv2/core.hpp"
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/calib3d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
using std::cout;
using std::endl;
const char* keys =
 "{ help h | | Print help message. }"
 "{ input1 | box.png | Path to input image 1. }"
 "{ input2 | box_in_scene.png | Path to input image 2. }";
int main( int argc, char* argv[] )
{
 CommandLineParser parser( argc, argv, keys );
 Mat img_object = imread( samples::findFile( parser.get<String>("input1") ), IMREAD_GRAYSCALE );
 Mat img_scene = imread( samples::findFile( parser.get<String>("input2") ), IMREAD_GRAYSCALE );
 if ( img_object.empty() || img_scene.empty() )
 {
 cout << "Could not open or find the image!\n" << endl;
 parser.printMessage();
 return -1;
 }
 //-- 第 1 步:使用 SURF 检测器检测关键点,计算描述符
 int minHessian = 400;
 Ptr<SURF> detector = SURF::create( minHessian );
 std::vector<KeyPoint> keypoints_object, keypoints_scene;
 Mat descriptors_object, descriptors_scene;
 detector->detectAndCompute( img_object, noArray(), keypoints_object, descriptors_object );
 detector->detectAndCompute( img_scene, noArray(), keypoints_scene, descriptors_scene );
 //-- 第 2 步:使用基于 FLANN 的匹配器匹配描述符向量
 // 由于 SURF 是浮点描述符,因此使用 NORM_L2
 Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
 std::vector< std::vector<DMatch> > knn_match;
 matcher->knnMatch( descriptors_object, descriptors_scene, knn_matches, 2 )//-- 使用洛氏比率测试筛选匹配
 const float ratio_thresh = 0.75f;
 std::vector<DMatch> good_matches;
 for (size_t i = 0; i < knn_matches.size(); i++)
 {
 if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance)
 {
 good_matches.push_back(knn_matches[i][0]);
 }
 }
 //-- 绘制匹配结果
 Mat img_matches;
 drawMatches( img_object, keypoints_object, img_scene, keypoints_scene, good_matches, img_matches, scalar::all(-1)Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS )//-- 本地化对象
 std::vector<Point2f> obj;
 std::vector<Point2f> scene;
 for( size_t i = 0; i < good_matches.size(); i++ )
 {
 //-- 从良好匹配中获取关键点
 obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
 scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt )}
 Mat H = findHomography( obj, scene, RANSAC )//-- 从图像_1(要 "检测 "的对象)中获取角点
 std::vector<Point2f> obj_corners(4);
 obj_corners[0] = Point2f(0, 0);
 obj_corners[1] = Point2f( (float)img_object.cols, 0 );
 obj_corners[2] = Point2f( (float)img_object.cols, (float)img_object.rows );
 obj_corners[3] = Point2f( 0, (float)img_object.rows );
 std::vector<Point2f> scene_corners(4)perspectiveTransform( obj_corners, scene_corners, H)//-- 在角落(场景中的映射对象 - image_2 )之间画线
 line( img_matches, scene_corners[0] + Point2f((float)img_object.cols, 0)、
 scene_corners[1] + Point2f((float)img_object.cols, 0), Scalar(0, 255, 0), 4 )line( img_matches, scene_corners[1] + Point2f((float)img_object.cols, 0)、
 scene_corners[2] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 )line( img_matches, scene_corners[2] + Point2f((float)img_object.cols, 0)、
 scene_corners[3] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 )line( img_matches, scene_corners[3] + Point2f((float)img_object.cols, 0)、
 scene_corners[0] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 )//-- 显示检测到的匹配结果
 imshow("Good Matches & Object detection", img_matches );
 waitKey();
 return 0;
}
#else
int main()
{
 std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl;
 return 0;
}
#endif

Java
本教程代码如下所示。您也可以从此处下载

import java.util.ArrayList;
import java.util.List;
import org.opencv.calib3d.Calib3d;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.DMatch;
import org.opencv.core.KeyPoint;
import org.opencv.core.Mat;
import org.opencv.core.MatOfByte;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.Features2d;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.opencv.xfeatures2d.SURF;
class SURFFLANNMatchingHomography {
 public void run(String[] args) {
 String filenameObject = args.length > 1 ? args[0] : "../data/box.png";
 String filenameScene = args.length > 1 ? args[1] : "../data/box_in_scene.png";
 Mat imgObject = Imgcodecs.imread(filenameObject, Imgcodecs.IMREAD_GRAYSCALE);
 Mat imgScene = Imgcodecs.imread(filenameScene, Imgcodecs.IMREAD_GRAYSCALE);
 if (imgObject.empty() || imgScene.empty()) {
 System.err.println("Cannot read images!");
 System.exit(0);
 }
 //-- 第 1 步:使用 SURF 检测器检测关键点,计算描述符
 double hessianThreshold = 400;
 int nOctaves = 4, nOctaveLayers = 3;
 boolean extended = false, upright = false;
 SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright);
 MatOfKeyPoint keypointsObject = new MatOfKeyPoint(), keypointsScene = new MatOfKeyPoint();
 Mat descriptorsObject = new Mat(), descriptorsScene = new Mat();
 detector.detectAndCompute(imgObject, new Mat(), keypointsObject, descriptorsObject);
 detector.detectAndCompute(imgScene, new Mat(), keypointsScene, descriptorsScene);
 //-- 第 2 步:使用基于 FLANN 的匹配器匹配描述符向量
 // 由于 SURF 是浮点描述符,因此使用 NORM_L2
 DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
 List<MatOfDMatch> knnMatches = new ArrayList<>();
 matcher.knnMatch(descriptorsObject, descriptorsScene, knnMatches, 2);
 //-- 使用洛氏比率测试筛选匹配项
 float ratioThresh = 0.75f;
 List<DMatch> listOfGoodMatches = new ArrayList<>();
 for (int i = 0; i < knnMatches.size(); i++) {
 if (knnMatches.get(i).rows() > 1) {
 DMatch[] matches = knnMatches.get(i).toArray();
 if (matches[0].distance < ratioThresh * matches[1].distance) {
 listOfGoodMatches.add(matches[0]);
 }
 }
 }
 MatOfDMatch goodMatches = new MatOfDMatch();
 goodMatches.fromList(listOfGoodMatches);
 //-- 绘制匹配
 Mat imgMatches = new Mat()Features2d.drawMatches(imgObject, keypointsObject, imgScene, keypointsScene, goodMatches, imgMatches, Scalar.all(-1)、
 goodMatches、imgMatches、Scalar.all(-1)new MatOfByte()Features2d.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)//-- 本地化对象
 List<Point> obj = new ArrayList<>();
 List<Point> scene = new ArrayList<>();
 List<KeyPoint> listOfKeypointsObject = keypointsObject.toList();
 List<KeyPoint> listOfKeypointsScene = keypointsScene.toList();
 for (int i = 0; i < listOfGoodMatches.size(); i++) {
 //-- 从良好匹配中获取关键点
 obj.add(listOfKeypointsObject.get(listOfGoodMatches.get(i).queryIdx).pt);
 scene.add(listOfKeypointsScene.get(listOfGoodMatches.get(i).trainIdx).pt);
 }
 MatOfPoint2f objMat = new MatOfPoint2f(), sceneMat = new MatOfPoint2f();
 objMat.fromList(obj);
 sceneMat.fromList(scene);
 double ransacReprojThreshold = 3.0;
 Mat H = Calib3d.findHomography( objMat, sceneMat, Calib3d.RANSAC, ransacReprojThreshold );
 //-- 从图像_1(要 "检测 "的对象)中获取角点)
 Mat objCorners = new Mat(4, 1, CvType.CV_32FC2), sceneCorners = new Mat();
 float[] objCornersData = new float[(int) (objCorners.total() * objCorners.channels())];
 objCorners.get(0, 0, objCornersData);
 objCornersData[0] = 0;
 objCornersData[1] = 0;
 objCornersData[2] = imgObject.cols();
 objCornersData[3] = 0;
 objCornersData[4] = imgObject.cols();
 objCornersData[5] = imgObject.rows();
 objCornersData[6] = 0;
 objCornersData[7] = imgObject.rows();
 objCorners.put(0, 0, objCornersData);
 Core.perspectiveTransform(objCorners, sceneCorners, H);
 float[] sceneCornersData = new float[(int) (sceneCorners.total() * sceneCorners.channels())];
 sceneCorners.get(0, 0, sceneCornersData);
 //--在角落(场景中的映射对象--image_2)之间画线
 Imgproc.line(imgMatches, new Point(sceneCornersData[0] + imgObject.cols(), sceneCornersData[1]),
 new Point(sceneCornersData[2] + imgObject.cols(), sceneCornersData[3]), new Scalar(0, 255, 0), 4);
 Imgproc.line(imgMatches, new Point(sceneCornersData[2] + imgObject.cols(), sceneCornersData[3]),
 new Point(sceneCornersData[4] + imgObject.cols(), sceneCornersData[5]), new Scalar(0, 255, 0), 4);
 Imgproc.line(imgMatches, new Point(sceneCornersData[4] + imgObject.cols(), sceneCornersData[5]),
 new Point(sceneCornersData[6] + imgObject.cols(), sceneCornersData[7]), new Scalar(0, 255, 0), 4);
 Imgproc.line(imgMatches, new Point(sceneCornersData[6] + imgObject.cols(), sceneCornersData[7]),
 new Point(sceneCornersData[0] + imgObject.cols(), sceneCornersData[1]), new Scalar(0, 255, 0), 4);
 //-- 显示检测到的匹配
 HighGui.imshow("Good Matches & Object detection", imgMatches);
 HighGui.waitKey(0);
 System.exit(0);
 }
}
public class SURFFLANNMatchingHomographyDemo {
 public static void main(String[] args) {
 // 加载本地 OpenCV 库
 System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
 new SURFFLANNMatchingHomography().run(args);
 }
}

Python
本教程代码如下所示。您也可以从此处下载

from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.')
parser.add_argument('--input1', help='Path to input image 1.', default='box.png')
parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png')
args = parser.parse_args()
img_object = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE)
img_scene = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE)
if img_object is None or img_scene is None:
 print('Could not open or find the images!')
 exit(0)
#-- 第 1 步: 使用 SURF 检测器检测关键点,计算描述符
minHessian = 400
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None)
keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None)
#-- 第 2 步:使用基于 FLANN 的匹配器匹配描述符向量
# 由于 SURF 是浮点描述符,因此使用 NORM_L2
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
#-- 使用 Lowe's ratio 检验筛选匹配结果
ratio_thresh = 0.75
good_matches = []
for m,n in knn_matches:
 if m.distance < ratio_thresh * n.distance:
 good_matches.append(m)
#-- 绘制匹配结果
img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
#-- 本地化对象
obj = np.empty((len(good_matches),2), dtype=np.float32)
scene = np.empty((len(good_matches),2), dtype=np.float32)
for i in range(len(good_matches))#-- 从良好匹配中获取关键点
 obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
 obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
 scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
 scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1]
H, _ = cv.findHomography(obj, scene, cv.RANSAC)
#-- 从图像_1(要 "检测 "的对象)中获取角点
obj_corners = np.empty((4,1,2), dtype=np.float32)
obj_corners[0,0,0] = 0
obj_corners[0,0,1] = 0
obj_corners[1,0,0] = img_object.shape[1]
obj_corners[1,0,1] = 0
obj_corners[2,0,0] = img_object.shape[1] = 0
obj_corners[2,0,1] = img_object.shape[0]。
obj_corners[3,0,0] = 0
obj_corners[3,0,1] = img_object.shape[0]
scene_corners = cv.perspectiveTransform(obj_corners, H)
#-- 在角落(场景中的映射对象 - image_2 )之间画线
cv.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\
 (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\
 (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\
 (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\
 (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
#-- 显示检测到的匹配
cv.imshow('Good Matches & Object detection', img_matches)
cv.waitKey()

检测结果

下面是检测到的对象(绿色高亮)的结果。请注意,由于同源性是通过 RANSAC 方法估算的,因此检测到的错误匹配不会影响同源性计算。

在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
机器学习水果识别是一种利用机器学习算法和图像处理技术对水果进行自动识别的方法。其中,使用Python中的OpenCV库实现物体特征提取是一种常见的实现方式。 OpenCV是一个强大的开源计算机视觉库,提供了许多用于图像处理和分析的函数和工具。它可以辅助我们实现水果识别所需要的特征提取步骤。 首先,我们需要准备水果图像数据集。这些图像可以是不同种类的水果,每个水果都有多个不同视角的图像。接下来,我们使用OpenCV库中的函数加载和处理这些图像。 在特征提取中,我们可以使用很多不同的技术。其中,最常用的方法是使用图像的颜色和纹理特征。在处理图像时,我们可以使用OpenCV中的函数计算这些特征。 例如,我们可以使用OpenCV中的函数提取图像的颜色直方图。这可以帮助我们了解图像中不同颜色的比例和分布情况。在水果识别中,不同水果的颜色特征往往是不同的。 此外,我们还可以使用OpenCV中的纹理特征提取方法,比如局部二值模式(Local Binary Patterns)。这可以帮助我们分析图像中的纹理信息,如图像的细节和纹理变化。这些纹理特征在识别不同类型的水果时也是有用的。 最后,我们可以使用机器学习算法,如支持向量机(SVM)或卷积神经网络(CNN),来训练一个分类模型。这个模型可以根据提取的特征来判断输入图像是否为某种水果。 总之,使用Python中的OpenCV库实现水果识别中的物体特征提取是一种非常有效的方法。通过提取图像的颜色和纹理特征,并使用机器学习算法进行分类,我们可以实现一个准确和高效的水果识别系统。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值