OpenCV中feature2D学习——SIFT和SURF算法实现目标检测

概述

       之前的文章SURF和SIFT算子实现特征点检测SURF算子实现特征点提取与匹配简单地讲了利用SIFT和SURF算子检测特征点,并且对特征点进行特征提取得到特征描述符(descriptors),在此基础上还可以进一步利用透视变换和空间映射找出已知物体(目标检测)。这里具体的实现是首先采用SURF/SIFT特征点检测与特征提取,然后采用FLANN匹配法保留好的匹配点,再利用findHomography找出相应的透视变换,最后采用perspectiveTransform函数映射点群,在场景中获取目标的位置。

       实验所用环境是opencv2.4.0+vs2008+win7,需要注意opencv2.4.X版本中SurfFeatureDetector/SiftFeatureDetector是包含在opencv2/nonfree/features2d.hpp中,FlannBasedMatcher是包含在opencv2/features2d/features2d.hpp中。

SURF算子

首先使用SURF算子进行目标检测,代码如下:

/**
* @概述: 采用SURF算子在场景中进行已知目标检测
* @类和函数: SurfFeatureDetector + SurfDescriptorExtractor + FlannBasedMatcher + findHomography + perspectiveTransform
* @实现步骤:
*		Step 1: 在图像中使用SURF算法SurfFeatureDetector检测关键点
*		Step 2: 对检测到的每一个关键点使用SurfDescriptorExtractor计算其特征向量(也称描述子)
*		Step 3: 使用FlannBasedMatcher通过特征向量对关键点进行匹配,使用阈值剔除不好的匹配
*		Step 4: 利用findHomography基于匹配的关键点找出相应的透视变换
*		Step 5: 利用perspectiveTransform函数映射点群,在场景中获取目标的位置
* @author: holybin
*/

#include <ctime>
#include <iostream>
#include "opencv2/core/core.hpp"	
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"	//SurfFeatureDetector实际在该头文件中
#include "opencv2/features2d/features2d.hpp"	//FlannBasedMatcher实际在该头文件中
#include "opencv2/calib3d/calib3d.hpp"	//findHomography所需头文件
using namespace cv;
using namespace std;

int main( int argc, char** argv )
{
	Mat imgObject = imread( "D:\\opencv_pic\\cat3d120.jpg", CV_LOAD_IMAGE_GRAYSCALE );
	Mat imgScene = imread( "D:\\opencv_pic\\cat0.jpg", CV_LOAD_IMAGE_GRAYSCALE );

	if( !imgObject.data || !imgScene.data )
	{ 
		cout<< " --(!) Error reading images "<<endl;
		return -1; 
	}

	double begin = clock();

	///-- Step 1: 使用SURF算子检测特征点
	int minHessian = 400;
	SurfFeatureDetector detector( minHessian );
	vector<KeyPoint> keypointsObject, keypointsScene;
	detector.detect( imgObject, keypointsObject );
	detector.detect( imgScene, keypointsScene );
	cout<<"object--number of keypoints: "<<keypointsObject.size()<<endl;
	cout<<"scene--number of keypoints: "<<keypointsScene.size()<<endl;

	///-- Step 2: 使用SURF算子提取特征(计算特征向量)
	SurfDescriptorExtractor extractor;
	Mat descriptorsObject, descriptorsScene;
	extractor.compute( imgObject, keypointsObject, descriptorsObject );
	extractor.compute( imgScene, keypointsScene, descriptorsScene );

	///-- Step 3: 使用FLANN法进行匹配
	FlannBasedMatcher matcher;
	vector< DMatch > allMatches;
	matcher.match( descriptorsObject, descriptorsScene, allMatches );
	cout<<"number of matches before filtering: "<<allMatches.size()<<endl;

	//-- 计算关键点间的最大最小距离
	double maxDist = 0;
	double minDist = 100;
	for( int i = 0; i < descriptorsObject.rows; i++ )
	{
		double dist = allMatches[i].distance;
		if( dist < minDist )
			minDist = dist;
		if( dist > maxDist )
			maxDist = dist;
	}
	printf("	max dist : %f \n", maxDist );
	printf("	min dist : %f \n", minDist );

	//-- 过滤匹配点,保留好的匹配点(这里采用的标准:distance<3*minDist)
	vector< DMatch > goodMatches;
	for( int i = 0; i < descriptorsObject.rows; i++ )
	{
		if( allMatches[i].distance < 2*minDist )
			goodMatches.push_back( allMatches[i]); 
	}
	cout<<"number of matches after filtering: "<<goodMatches.size()<<endl;

	//-- 显示匹配结果
	Mat resultImg;
	drawMatches( imgObject, keypointsObject, imgScene, keypointsScene, 
		goodMatches, resultImg, Scalar::all(-1), Scalar::all(-1), vector<char>(), 
		DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS //不显示未匹配的点
		); 
	//-- 输出匹配点的对应关系
	for( int i = 0; i < goodMatches.size(); i++ )
		printf( "	good match %d: keypointsObject [%d]  -- keypointsScene [%d]\n", i, 
		goodMatches[i].queryIdx, goodMatches[i].trainIdx );

	///-- Step 4: 使用findHomography找出相应的透视变换
	vector<Point2f> object;
	vector<Point2f> scene;
	for( size_t i = 0; i < goodMatches.size(); i++ )
	{
		//-- 从好的匹配中获取关键点: 匹配关系是关键点间具有的一 一对应关系,可以从匹配关系获得关键点的索引
		//-- e.g. 这里的goodMatches[i].queryIdx和goodMatches[i].trainIdx是匹配中一对关键点的索引
		object.push_back( keypointsObject[ goodMatches[i].queryIdx ].pt );
		scene.push_back( keypointsScene[ goodMatches[i].trainIdx ].pt ); 
	}
	Mat H = findHomography( object, scene, CV_RANSAC );

	///-- Step 5: 使用perspectiveTransform映射点群,在场景中获取目标位置
	std::vector<Point2f> objCorners(4);
	objCorners[0] = cvPoint(0,0);
	objCorners[1] = cvPoint( imgObject.cols, 0 );
	objCorners[2] = cvPoint( imgObject.cols, imgObject.rows );
	objCorners[3] = cvPoint( 0, imgObject.rows );
	std::vector<Point2f> sceneCorners(4);
	perspectiveTransform( objCorners, sceneCorners, H);

	//-- 在被检测到的目标四个角之间划线
	line( resultImg, sceneCorners[0] + Point2f( imgObject.cols, 0), sceneCorners[1] + Point2f( imgObject.cols, 0), Scalar(0, 255, 0), 4 );
	line( resultImg, sceneCorners[1] + Point2f( imgObject.cols, 0), sceneCorners[2] + Point2f( imgObject.cols, 0), Scalar( 0, 255, 0), 4 );
	line( resultImg, sceneCorners[2] + Point2f( imgObject.cols, 0), sceneCorners[3] + Point2f( imgObject.cols, 0), Scalar( 0, 255, 0), 4 );
	line( resultImg, sceneCorners[3] + Point2f( imgObject.cols, 0), sceneCorners[0] + Point2f( imgObject.cols, 0), Scalar( 0, 255, 0), 4 );

	//-- 显示检测结果
	imshow("detection result", resultImg );

	double end = clock();
	cout<<"\nSURF--elapsed time: "<<(end - begin)/CLOCKS_PER_SEC*1000<<" ms\n";

	waitKey(0);
	return 0;
}

实验结果:



SIFT算子

作为对比,再使用SIFT算子进行目标检测,只需要将SurfFeatureDetector换成SiftFeatureDetector,将SurfDescriptorExtractor换成SiftDescriptorExtractor即可。代码如下:

/**
* @概述: 采用SIFT算子在场景中进行已知目标检测
* @类和函数: SiftFeatureDetector + SiftDescriptorExtractor + FlannBasedMatcher + findHomography + perspectiveTransform
* @实现步骤:
*		Step 1: 在图像中使用SIFT算法SiftFeatureDetector检测关键点
*		Step 2: 对检测到的每一个关键点使用SiftDescriptorExtractor计算其特征向量(也称描述子)
*		Step 3: 使用FlannBasedMatcher通过特征向量对关键点进行匹配,使用阈值剔除不好的匹配
*		Step 4: 利用findHomography基于匹配的关键点找出相应的透视变换
*		Step 5: 利用perspectiveTransform函数映射点群,在场景中获取目标的位置
* @author: holybin
*/

#include <ctime>
#include <iostream>
#include "opencv2/core/core.hpp"	
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"	//SiftFeatureDetector实际在该头文件中
#include "opencv2/features2d/features2d.hpp"	//FlannBasedMatcher实际在该头文件中
#include "opencv2/calib3d/calib3d.hpp"	//findHomography所需头文件
using namespace cv;
using namespace std;

int main( int argc, char** argv )
{
	Mat imgObject = imread( "D:\\opencv_pic\\cat3d120.jpg", CV_LOAD_IMAGE_GRAYSCALE );
	Mat imgScene = imread( "D:\\opencv_pic\\cat0.jpg", CV_LOAD_IMAGE_GRAYSCALE );

	if( !imgObject.data || !imgScene.data )
	{ 
		cout<< " --(!) Error reading images "<<endl;
		return -1; 
	}

	double begin = clock();

	///-- Step 1: 使用SIFT算子检测特征点
	//int minHessian = 400;
	SiftFeatureDetector detector;//( minHessian );
	vector<KeyPoint> keypointsObject, keypointsScene;
	detector.detect( imgObject, keypointsObject );
	detector.detect( imgScene, keypointsScene );
	cout<<"object--number of keypoints: "<<keypointsObject.size()<<endl;
	cout<<"scene--number of keypoints: "<<keypointsScene.size()<<endl;

	///-- Step 2: 使用SIFT算子提取特征(计算特征向量)
	SiftDescriptorExtractor extractor;
	Mat descriptorsObject, descriptorsScene;
	extractor.compute( imgObject, keypointsObject, descriptorsObject );
	extractor.compute( imgScene, keypointsScene, descriptorsScene );

	///-- Step 3: 使用FLANN法进行匹配
	FlannBasedMatcher matcher;
	vector< DMatch > allMatches;
	matcher.match( descriptorsObject, descriptorsScene, allMatches );
	cout<<"number of matches before filtering: "<<allMatches.size()<<endl;

	//-- 计算关键点间的最大最小距离
	double maxDist = 0;
	double minDist = 100;
	for( int i = 0; i < descriptorsObject.rows; i++ )
	{
		double dist = allMatches[i].distance;
		if( dist < minDist )
			minDist = dist;
		if( dist > maxDist )
			maxDist = dist;
	}
	printf("	max dist : %f \n", maxDist );
	printf("	min dist : %f \n", minDist );

	//-- 过滤匹配点,保留好的匹配点(这里采用的标准:distance<3*minDist)
	vector< DMatch > goodMatches;
	for( int i = 0; i < descriptorsObject.rows; i++ )
	{
		if( allMatches[i].distance < 2*minDist )
			goodMatches.push_back( allMatches[i]); 
	}
	cout<<"number of matches after filtering: "<<goodMatches.size()<<endl;

	//-- 显示匹配结果
	Mat resultImg;
	drawMatches( imgObject, keypointsObject, imgScene, keypointsScene, 
		goodMatches, resultImg, Scalar::all(-1), Scalar::all(-1), vector<char>(), 
		DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS //不显示未匹配的点
		); 
	//-- 输出匹配点的对应关系
	for( int i = 0; i < goodMatches.size(); i++ )
		printf( "	good match %d: keypointsObject [%d]  -- keypointsScene [%d]\n", i, 
		goodMatches[i].queryIdx, goodMatches[i].trainIdx );

	///-- Step 4: 使用findHomography找出相应的透视变换
	vector<Point2f> object;
	vector<Point2f> scene;
	for( size_t i = 0; i < goodMatches.size(); i++ )
	{
		//-- 从好的匹配中获取关键点: 匹配关系是关键点间具有的一 一对应关系,可以从匹配关系获得关键点的索引
		//-- e.g. 这里的goodMatches[i].queryIdx和goodMatches[i].trainIdx是匹配中一对关键点的索引
		object.push_back( keypointsObject[ goodMatches[i].queryIdx ].pt );
		scene.push_back( keypointsScene[ goodMatches[i].trainIdx ].pt ); 
	}
	Mat H = findHomography( object, scene, CV_RANSAC );

	///-- Step 5: 使用perspectiveTransform映射点群,在场景中获取目标位置
	std::vector<Point2f> objCorners(4);
	objCorners[0] = cvPoint(0,0);
	objCorners[1] = cvPoint( imgObject.cols, 0 );
	objCorners[2] = cvPoint( imgObject.cols, imgObject.rows );
	objCorners[3] = cvPoint( 0, imgObject.rows );
	std::vector<Point2f> sceneCorners(4);
	perspectiveTransform( objCorners, sceneCorners, H);

	//-- 在被检测到的目标四个角之间划线
	line( resultImg, sceneCorners[0] + Point2f( imgObject.cols, 0), sceneCorners[1] + Point2f( imgObject.cols, 0), Scalar(0, 255, 0), 4 );
	line( resultImg, sceneCorners[1] + Point2f( imgObject.cols, 0), sceneCorners[2] + Point2f( imgObject.cols, 0), Scalar( 0, 255, 0), 4 );
	line( resultImg, sceneCorners[2] + Point2f( imgObject.cols, 0), sceneCorners[3] + Point2f( imgObject.cols, 0), Scalar( 0, 255, 0), 4 );
	line( resultImg, sceneCorners[3] + Point2f( imgObject.cols, 0), sceneCorners[0] + Point2f( imgObject.cols, 0), Scalar( 0, 255, 0), 4 );

	//-- 显示检测结果
	imshow("detection result", resultImg );

	double end = clock();
	cout<<"\nSIFT--elapsed time: "<<(end - begin)/CLOCKS_PER_SEC*1000<<" ms\n";

	waitKey(0);
	return 0;
}

实验结果:



可以看出,SURF的速度比SIFT慢了,主要是由于匹配点较多计算复杂度高造成的,但是匹配点个数比SIFT多,所以准确度比SIFT高。


  • 6
    点赞
  • 46
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值