利用特征点(Brief,ORB,SIFT)进行图像匹配,模板匹配

头文件

在VS2010+OpenCV2.3.1 

#include "StdAfx.h"
#include "opencv2/core/core.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
//#include <opencv2/legacy/legacy.hpp>
#include <vector>
#include <iostream>
#include <fstream>
#include <math.h>

using namespace cv;
using namespace std;
void getMatchLR(string imgpath);
void getMatchDT(string imgpath);
//void getSURFFeature(string imgpath);
void getORBFeature(string imgpath);
void MatchTemplate(string imgname);
void MatchTemplateORB(string imgname);
void MatchTemplateSIFT(string imgname);
void testMatch();

static string imgdir = "E:\\input_resize\\";
static string Matchsavedir = "E:\\macth_result\\";
static string TemplateDir = "E:\\template_resize\\";
//static string ORBsavedir = "ORB_result\\";
IplImage mat_src_lpl;
IplImage OutImage_lpl;
IplImage TemplateIpl;

IplImage* dst_left;
IplImage* dst_right;
IplImage *src;
IplImage *TemplateIplPtr;

Mat mat_src;
Mat OutImage;
Mat TemplateMat;

在UBUNTU12.04UTL+opencv2.4.
#include "opencv2/core/core.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/legacy/legacy.hpp>
#include <vector>
#include <iostream>
#include <fstream>
#include <math.h>

主函数

int main()
{
	string imgname = "07_in.jpg";
	//testMatch();
	MatchTemplateSIFT(imgname);
	//getMatchLR(imgname);

	return 0;
}

SIFT

利用SIFT特征进行模板匹配,template1为一个物体的小图,然后在一张含有多个同样物体的大图上进行匹配。

在大图上滑窗处理,得到每一个滑窗的特征,进行匹配,计算距离均值,作为一个灰度值,最后生成一个大图。

特征子,描述符,匹配方法分别为:

new SiftFeatureDetector
new SiftDescriptorExtractor;
BruteForceMatcher<L2(float)>

详细代码

void MatchTemplateSIFT(string imgname)
{
	vector<KeyPoint> LeftKey;
	vector<KeyPoint> RightKey;
	Mat LeftDescriptor;
	Mat RightDescriptor;
	vector<DMatch> Matches;
	//vector<double> meanDisances;
	IplImage* dst;

	ofstream fout("E:\\picin\\rBRIEF_Test\\templateCompareResult.txt");
	int xstep = 2;
	int ystep = 2;
	string TemplateName = "template1.jpg";
	string TemplateImgPath = TemplateDir + TemplateName;

	string imgpath = imgdir + imgname;
	string resultPath = Matchsavedir+imgname;

	TemplateMat = imread(TemplateImgPath,CV_LOAD_IMAGE_GRAYSCALE);
	if(!TemplateMat.data){
		cout<<"no template exist";
		return ;
	}
	int TemplateWidth = TemplateMat.cols;
	int TemplateHeight = TemplateMat.rows;
	std::cout<<"TemplateWidth "<<TemplateWidth<<endl;
	std::cout<<"TemplateHeight "<<TemplateHeight<<endl;

	FeatureDetector *pDetector = new SiftFeatureDetector; 	pDetector->detect(TemplateMat, LeftKey);
	DescriptorExtractor *pExtractor = new SiftDescriptorExtractor; 
	pExtractor->compute(TemplateMat, LeftKey, LeftDescriptor);

	DescriptorMatcher *pMatcher = new BruteForceMatcher<L2<float>>;

	

	mat_src = imread(imgpath, CV_LOAD_IMAGE_GRAYSCALE );
    if(!mat_src.data) {
    	cout<<"no src img";
    	return ;
      }	mat_src_lpl = IplImage(mat_src);
	src  = &mat_src_lpl;
	long ImgWidth = src->width;
	long ImgHeight = src->height;
	std::cout<<"ImgWidth "<<ImgWidth<<endl;
	std::cout<<"ImgHeight "<<ImgHeight<<endl;

	int x;
	int y;


	//Mat R = Mat(ImgHeight - TemplateHeight, ImgWidth - TemplateWidth, CV_8UC1,255);
	//namedWindow("result", CV_WINDOW_NORMAL );
	//imshow("result",mat_src);
	//uchar *p;
	//while(start_x < ImgWidth - TemplateWidth){
	for(long start_y = 0;start_y <ImgHeight - TemplateHeight;start_y = start_y+ystep){
		for(long start_x = 0;start_x < ImgWidth - TemplateWidth;start_x = start_x+xstep){
		
			x = start_x;
			y = start_y;
			

			cvSetImageROI(src,cvRect(x,y,TemplateWidth,TemplateHeight));
		    dst = cvCreateImage(cvSize(TemplateWidth,TemplateHeight),
		            IPL_DEPTH_8U,
		            src->nChannels);
		    cvCopy(src,dst,0);
		    cvResetImageROI(src);

		    Mat DstImage = Mat(dst, false); // Do not copy
		    pDetector->detect(DstImage, RightKey);

			pExtractor->compute(DstImage, RightKey, RightDescriptor);

			pMatcher->match(LeftDescriptor, RightDescriptor, Matches);
			//double sum = 0;
			double sum = 0;
			//int i = 0;
			for(vector<DMatch>::iterator dite = Matches.begin();dite <Matches.end(); dite++ )
			  {
				  sum += dite->distance;
			  }
			int matchSize = Matches.size()*10;

			if(matchSize>1){

				fout<<exp(-sum/matchSize)<<" ";

			}else{
				fout<<exp(-100.0)<<" ";

			}

		}
	}
	//
	std::cout<<"finish";
	fout.close();

	//destroyWindow("result" );
	//imwrite(resultPath,R);

	delete pDetector;
	delete pExtractor;
	delete pMatcher;

}

SURF

SURF特征同SIFT特征。

ORB

描述子和描述符,匹配代码如下

	ORB orb;
	orb(TemplateMat,Mat(),LeftKey,LeftDescriptor);
	DescriptorMatcher *pMatcher = new BruteForceMatcher<HammingLUT>;

详细代码如下

void MatchTemplateORB(string imgname)
{
	vector<KeyPoint> LeftKey;
	vector<KeyPoint> RightKey;
	Mat LeftDescriptor;
	Mat RightDescriptor;
	vector<DMatch> Matches;
	//vector<double> meanDisances;
	IplImage* dst;

	ofstream fout("E:\\picin\\rBRIEF_Test\\templateCompareResult.txt");
	int xstep = 2;
	int ystep = 2;
	string TemplateName = "template2.jpg";
	string TemplateImgPath = TemplateDir + TemplateName;

	string imgpath = imgdir + imgname;
	string resultPath = Matchsavedir+imgname;

		TemplateMat = imread(TemplateImgPath,CV_LOAD_IMAGE_GRAYSCALE);
	if(!TemplateMat.data){
		cout<<"no template exist";
		return ;
	}
	int TemplateWidth = TemplateMat.cols;
	int TemplateHeight = TemplateMat.rows;
	std::cout<<"TemplateWidth "<<TemplateWidth<<endl;
	std::cout<<"TemplateHeight "<<TemplateHeight<<endl;
	ORB orb;
	orb(TemplateMat,Mat(),LeftKey,LeftDescriptor);

	DescriptorMatcher *pMatcher = new BruteForceMatcher<HammingLUT>;

	

	mat_src = imread(imgpath, CV_LOAD_IMAGE_GRAYSCALE );
    if(!mat_src.data) {
    	cout<<"no src img";
    	return ;
      }
	mat_src_lpl = IplImage(mat_src);
	src  = &mat_src_lpl;
	long ImgWidth = src->width;
	long ImgHeight = src->height;
	std::cout<<"ImgWidth "<<ImgWidth<<endl;
	std::cout<<"ImgHeight "<<ImgHeight<<endl;



	int x;
	int y;


	//Mat R = Mat(ImgHeight - TemplateHeight, ImgWidth - TemplateWidth, CV_8UC1,255);
	//namedWindow("result", CV_WINDOW_NORMAL );
	//imshow("result",mat_src);
	//uchar *p;
	//while(start_x < ImgWidth - TemplateWidth){
	for(long start_y = 0;start_y <ImgHeight - TemplateHeight;start_y = start_y+ystep){
		for(long start_x = 0;start_x < ImgWidth - TemplateWidth;start_x = start_x+xstep){
		
			x = start_x;
			y = start_y;
			//std::cout<<"<"<<x<<","<<y<<">"<<" ";

			cvSetImageROI(src,cvRect(x,y,TemplateWidth,TemplateHeight));
		    dst = cvCreateImage(cvSize(TemplateWidth,TemplateHeight),
		            IPL_DEPTH_8U,
		            src->nChannels);
		    cvCopy(src,dst,0);
		    cvResetImageROI(src);

		    Mat DstImage = Mat(dst, false); // Do not copy
			orb(DstImage,Mat(),RightKey,RightDescriptor);
			//std::cout<<RightDescriptor.size();
			pMatcher->match(LeftDescriptor, RightDescriptor, Matches);
			//double sum = 0;
			double sum = 0;
			//int i = 0;
			for(vector<DMatch>::iterator dite = Matches.begin();dite <Matches.end(); dite++ )
			  {
				  sum += dite->distance;
			  }
			int matchSize = Matches.size()*10;
			//std::cout<<matchSize<<" ";
			if(matchSize>1){
				//int meanDis = sum/matchSize;
				fout<<exp(-sum/matchSize)<<" ";
				//std::cout<<"meanDis"<<meanDis<<" ";
				//R.at<uchar>(x,y) = meanDis;
			}else{
				fout<<exp(-100.0)<<" ";
				//fout<<255<<" ";
				//std::cout<<"meanDis"<<255<<" ";
			}

		}
		//std::cout<<endl;
		fout<<"\n";
		//start_x += step;
	}
	//
	std::cout<<"finish";
	fout.close();

	//destroyWindow("result" );
	//imwrite(resultPath,R);

	//delete pDetector;
	//delete pExtractor;
	delete pMatcher;

}

对利用特征点对图片左右部分进行匹配

void getMatchLR(string imgname)
{

	string imgpath = imgdir + imgname;
	mat_src = imread(imgpath, CV_LOAD_IMAGE_GRAYSCALE );
    if(!mat_src.data) {
    	cout<<"no img";
    	return ;
      }
	mat_src_lpl = IplImage(mat_src);
	src  = &mat_src_lpl;


	//
	cvSetImageROI(src,cvRect(0,0,0.5*src->width,src->height));
    dst_left = cvCreateImage(cvSize(0.5*src->width,src->height),
            IPL_DEPTH_8U,
            src->nChannels);
    cvCopy(src,dst_left,0);
    cvResetImageROI(src);

	cvSetImageROI(src,cvRect(0.5*src->width,0,0.5*src->width,src->height));
    dst_right = cvCreateImage(cvSize(0.5*src->width,src->height),
            IPL_DEPTH_8U,
            src->nChannels);
    cvCopy(src,dst_right,0);
    cvResetImageROI(src);


	 // Convert IplImage to cv::Mat
	Mat matLeftImage = Mat(dst_left, false); // Do not copy
	Mat matRightImage = Mat(dst_right, false);

	// Key point and its descriptor
	vector<KeyPoint> LeftKey;
	vector<KeyPoint> RightKey;
	Mat LeftDescriptor;
	Mat RightDescriptor;
	vector<DMatch> Matches;

	/*
	// Detect key points from image
	FeatureDetector *pDetector = new FastFeatureDetector; // 
	pDetector->detect(matLeftImage, LeftKey);
	pDetector->detect(matRightImage, RightKey);
	delete pDetector;

	// Extract descriptors
	DescriptorExtractor *pExtractor = new BriefDescriptorExtractor; // 
	pExtractor->compute(matLeftImage, LeftKey, LeftDescriptor);
	pExtractor->compute(matRightImage, RightKey, RightDescriptor);
	delete pExtractor;
	*/
	
	
	ORB orb;
	orb(matLeftImage,Mat(),LeftKey,LeftDescriptor);
	orb(matRightImage,Mat(),RightKey,RightDescriptor);
	

	// Matching features
	//DescriptorMatcher *pMatcher = new FlannBasedMatcher; // 
	DescriptorMatcher *pMatcher = new BruteForceMatcher<HammingLUT>; // 
	pMatcher->match(LeftDescriptor, RightDescriptor, Matches);
	delete pMatcher;


	double max_dist = 0; double min_dist = 200;

	  //-- Quick calculation of max and min distances between keypoints
	  for( int i = 0; i < LeftDescriptor.rows; i++ )
	  { double dist = Matches[i].distance;
	    if( dist < min_dist ) min_dist = dist;
	    if( dist > max_dist ) max_dist = dist;
	  }

	  //printf("-- Max dist : %f \n", max_dist );
	  //printf("-- Min dist : %f \n", min_dist );

	  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
	  //-- PS.- radiusMatch can also be used here.
	  std::vector< DMatch > good_matches;

	  for( int i = 0; i < LeftDescriptor.rows; i++ )
	  { if( Matches[i].distance < 0.5*max_dist )
	    { good_matches.push_back( Matches[i]); }
	  }

	// Show result
	//drawMatches(matLeftImage, LeftKey, matRightImage, RightKey, Matches, OutImage);
	drawMatches( matLeftImage, LeftKey, matRightImage, RightKey,
	               good_matches, OutImage, Scalar::all(-1), Scalar::all(-1),
	               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
	OutImage_lpl = IplImage(OutImage);
	cvNamedWindow( "Match features", 1);
	cvShowImage("Match features", &(OutImage_lpl));
	cvWaitKey( 0 );
	cvDestroyWindow( "Match features" );

	string savepath = Matchsavedir + imgname;
	  //-- Show detected (drawn) keypoints
	imwrite(savepath, OutImage );//
}

以上代码中被注释的部分

	// Detect key points from image
	FeatureDetector *pDetector = new FastFeatureDetector; // 
	pDetector->detect(matLeftImage, LeftKey);
	pDetector->detect(matRightImage, RightKey);
	delete pDetector;

	// Extract descriptors
	DescriptorExtractor *pExtractor = new BriefDescriptorExtractor; // 
	pExtractor->compute(matLeftImage, LeftKey, LeftDescriptor);
	pExtractor->compute(matRightImage, RightKey, RightDescriptor);

是利用的BRIEF特征,进行左右匹配





  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
sift 算法、surf 算法和 orb 算法是常用的特征点检测和匹配算法。 SIFT(尺度不变特征变换)算法是一种基于局部特征的图像匹配算法。该算法通过检测关键点,并提取关键点的主方向以及局部邻域的描述子,通过比较描述子来进行特征点匹配。SIFT 算法对旋转、尺度缩放、亮度变化等具有较好的不变性,因此能够在不同环境下进行特征点匹配。 SURF(加速稳健特征)算法是对 SIFT 算法的改进算法。SURF 算法采用快速 Hessian 矩阵检测特征点,对特征点周围的区域进行盲区不变性和旋转不变性验证,同样提取特征点的主方向和局部邻域的描述子,并使用哈希等方法进行特征点匹配。SURF 算法在计算效率上相对于 SIFT 算法有一定优势,但在对变形和视角变化等方面的鲁棒性上稍逊于 SIFT 算法。 ORB(Oriented FAST and Rotated BRIEF)算法是一种基于 FAST 特征检测和 BRIEF 描述子的算法。ORB 算法首先通过 FAST 特征检测算法检测特征点,然后利用 BRIEF 描述子进行特征描述。ORB 算法使用了旋转不变性和尺度不变性的设计,通过计算旋转角度和构建金字塔来提高检测的鲁棒性。ORB 算法在计算速度上比 SIFTSURF 更快,同时保持了一定的特征点匹配能力。 在实验中,我们可以分别使用这三种算法检测并提取图像的特征点,然后通过对比特征点的描述子来进行匹配。通过比较匹配的结果,我们可以评估这三种算法在特征点匹配任务上的性能表现,包括计算速度和匹配准确度等方面的指标。根据实验结果,我们可以选择适合具体应用场景的算法进行特征点匹配任务。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值