使用OpenCV显示SiftGPU提取的特征点和特征匹配

使用OpenCV显示SiftGPU提取的特征点和特征匹配,主要包括以下几个步骤:

  1. SiftGPU特征点提取
  2. OpenCV读入图像
  3. SiftGPU特征点类型到OpenCV特征点类型KeyPoint的转换
  4. OpenCV特征点显示
  5. SiftGPU描述子类型到OpenCV描述子类型的转换
  6. SiftGPU特征匹配
  7. SiftGPU特征匹配到OpenCV匹配DMatch类型的转换

具体代码如下:
代码采用image.txt输入待处理的图像路径,此处根据自己实际处理的图像自行设置。

#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>  
#include <opencv2/highgui/highgui.hpp>  
#include <GL/glut.h>

#include <boost/timer.hpp>

#include "compare.h"

using namespace std;
using namespace cv;

#if !defined(SIFTGPU_STATIC) && !defined(SIFTGPU_DLL_RUNTIME) 
// SIFTGPU_STATIC comes from compiler
#define SIFTGPU_DLL_RUNTIME
// Load at runtime if the above macro defined
// comment the macro above to use static linking
#endif

#ifdef _WIN32
	#ifdef SIFTGPU_DLL_RUNTIME
		#define WIN32_LEAN_AND_MEAN
		#include <windows.h>
		#define FREE_MYLIB FreeLibrary
		#define GET_MYPROC GetProcAddress
	#else
		//define this to get dll import definition for win32
		#define SIFTGPU_DLL
		#ifdef _DEBUG 
			#pragma comment(lib, "../lib/SIFTGPU.lib")
		#else
			#pragma comment(lib, "../lib/SIFTGPU.lib")
		#endif
	#endif
#else
	#ifdef SIFTGPU_DLL_RUNTIME
	#include <dlfcn.h>
	#define FREE_MYLIB dlclose
	#define GET_MYPROC dlsym
	#endif
#endif

#include <SiftGPU.h>

int main(int argc, char** argv)
{
#ifdef SIFTGPU_DLL_RUNTIME
	#ifdef _WIN32
		#ifdef _DEBUG
				HMODULE  hsiftgpu = LoadLibrary("SiftGPU_d.dll");
		#else
				HMODULE  hsiftgpu = LoadLibrary("siftgpu.dll");
		#endif
	#else
			void * hsiftgpu = dlopen("libsiftgpu.so", RTLD_LAZY);
	#endif
			if (hsiftgpu == NULL) {
				DWORD error_id = GetLastError();
				cout << "error loading SIFTGPU.dll" << endl;
				cout << "error id is " << error_id << endl;
				system("pause");
				return 0;
			}
#endif

	//声明SiftGPU并初始化  
	SiftGPU sift;
	char* myargv[4] = { "-fo", "-1", "-v", "1" };
	sift.ParseParam(4, myargv);
	//检查硬件是否支持SiftGPU  
	int support = sift.CreateContextGL();
	if (support != SiftGPU::SIFTGPU_FULL_SUPPORTED)
	{
		cerr << "SiftGPU is not supported!" << endl;
		return 2;
	}
	//读入txt文件列表内的图像名
	vector<string> imageNames;
	string fileName;
	int imgCount = 0;
	ifstream fin("image.txt");
	while (getline(fin, fileName)){
		imgCount++;
		imageNames.push_back(fileName);
	}
	if (imgCount == 0){
		cerr << "cannot get image files" << endl;
		return 1;
	}
	else{
		cout << "Read In " << imgCount << "  Pictures" << endl;
	}
	//OpenCV读入图像
	vector<Mat> images;
	for (int i = 0; i < imgCount; ++i){
		Mat temp = imread(imageNames[i]);
		images.push_back(temp);
	}
	cout << "OpenCV Read in " << images.size() << " images" << endl;
	//SiftGPU读入图像并处理
	vector<int> num;
	vector<vector<float>> descriptors;
	vector<vector<SiftGPU::SiftKeypoint>> keys;
	for (int i = 0; i < imgCount; ++i){
		const char* temp = imageNames[i].data();
		sift.RunSIFT(temp);
		int _num = sift.GetFeatureNum();
		num.push_back(_num);
		cout << "Keypoints Number of " << imageNames[i] << " is= " << _num << endl << endl;
		vector<SiftGPU::SiftKeypoint> _keys;
		vector<float> _descriptors;
		_keys.resize(_num);
		_descriptors.resize(128*_num);
		sift.GetFeatureVector(&_keys[0], &_descriptors[0]);	
		keys.push_back(_keys);
		descriptors.push_back(_descriptors);
	}
	//SiftGPU的特征点赋值于OpenCV关键点类型
	vector<vector<KeyPoint>> cvKeypoints;
	for (int i = 0; i < imgCount; ++i){
		vector<KeyPoint> cvKeys;
		for (int j = 0; j < keys[i].size(); ++j){
			KeyPoint cvTempKey;
			cvTempKey.pt.x = keys[i][j].x;
			cvTempKey.pt.y = keys[i][j].y;
			cvKeys.push_back(cvTempKey);
		}
		cvKeypoints.push_back(cvKeys);
	}
	//验证第i张图片的特征点数是否相同
	for (int i = 0; i < imgCount; ++i){
		if (cvKeypoints[i].size() != keys[i].size())
			cerr << "Image " << i << "'s Keypoints Number isn't correct." << endl;
	}
	//使用OpenCV接口显示关键点
	for (int i = 0; i < imgCount; ++i){
		Mat featureImage;
		drawKeypoints(images[i], cvKeypoints[i], featureImage, Scalar(255, 255, 255), DrawMatchesFlags::DEFAULT);
		namedWindow("Sift Keypoints",WINDOW_NORMAL);
		imshow("Sift Keypoints", featureImage);
		waitKey(0);
	}

	//SiftGPU的描述子赋值于OpenCV描述子类型
	vector<Mat> cvDescriptors(imgCount);  //每个Mat内部是num行,128列的特征点描述矩阵
	for (int inum = 0; inum < imgCount; ++inum){
		//图i的SiftGPU的描述子_des
		vector<vector<float>> _des;
		for (int sj = 0; sj < num[inum]; ++sj){
			vector<float> temp(128, 0.0);
			for (int sk = 0; sk < 128; ++sk){
				temp[sk] = descriptors[inum][sk + 128 * sj];
			}
			_des.push_back(temp);
		}
		//图i的OpenCV描述子_cvDes
		Mat _cvDes(num[inum], 128, CV_32F);		
		for (int cj = 0; cj < num[inum]; ++cj){
			float* pxDesMat = _cvDes.ptr<float>(cj);
			for (int ck = 0; ck < 128; ++ck){
				pxDesMat[ck] = _des[cj][ck];
			}
		}
		输出_des的极值
		//float smax = _des[0][0];
		//float smin = _des[0][0];
		//for (int simm = 0; simm < _des.size(); ++simm){
		//	for (int sjmm = 0; sjmm < 128; ++sjmm){
		//		if (smax < _des[simm][sjmm])
		//			smax = _des[simm][sjmm];
		//		if (smin > _des[simm][sjmm])
		//			smin = _des[simm][sjmm];
		//	}
		//}
		//cout << "SiftGPU Descriptors Max= " << smax << " , Min=" << smin << endl;
		输出_cvDes的极值
		//float cmax = _cvDes.at<float>(0, 0);
		//float cmin = _cvDes.at<float>(0, 0);
		//for (int cimm = 0; cimm < _cvDes.rows; ++cimm){
		//	for (int cjmm = 0; cjmm < 128; ++cjmm){
		//		if (cmax < _cvDes.at<float>(cimm,cjmm))
		//			cmax = _cvDes.at<float>(cimm, cjmm);
		//		if (cmin > _cvDes.at<float>(cimm, cjmm))
		//			cmin = _cvDes.at<float>(cimm, cjmm);
		//	}
		//}
		//cout << "OpenCV Descriptors Max= " << cmax << " , Min=" << cmin << endl;
		cvDescriptors.push_back(_cvDes);
	}
	map<pair<int, int>, vector<DMatch>> matches_matrix;
	map<pair<int, int>, int> matches_num;
	SiftMatchGPU matcher;
	matcher.VerifyContextGL();
	for (int i = 0; i < imgCount - 1; ++i){
		for (int j = i + 1; j < imgCount; ++j){
			matcher.SetDescriptors(0, num[i], &descriptors[i][0]);
			matcher.SetDescriptors(1, num[j], &descriptors[j][0]);
			int(*match_buf)[2] = new int[num[i]][2];
			int num_match = matcher.GetSiftMatch(num[i], match_buf);
			matches_num[make_pair(i, j)] = num_match;
			cout << "Matches between Image" << i << " and Image" << j << " are " << num_match << endl;
			//定义OpenCV的match类型,并赋值quryIdx和trainIdx
			vector<DMatch> matches_tmp;
			for (int mk = 0; mk < num_match; ++mk){
				DMatch match;
				match.queryIdx = match_buf[mk][0];
				match.trainIdx = match_buf[mk][1];
				Point2f p1, p2;
				p1.x = cvKeypoints[i][match.queryIdx].pt.x;
				p1.y = cvKeypoints[i][match.queryIdx].pt.y;
				p2.x = cvKeypoints[j][match.trainIdx].pt.x;
				p2.y = cvKeypoints[j][match.trainIdx].pt.y;
				float dist;
				dist = (float)sqrtf(((p1.x - p2.x)*(p1.x - p2.x) + (p1.y - p2.y)*(p1.y - p2.y)));
				match.distance = dist;
				//cout << "Image" << i << " & Image" << j << " Match " << mk << ": " << endl;
				//cout << "Idx: queryIdx is= " << match.queryIdx << " , trainIdx is= " << match.trainIdx << endl;
				//cout << "                         Keypoint1: " << p1.x << "   " << p1.y << endl;
				//cout << "                         Keypoint2: " << p2.x << "   " << p2.y << endl;
				//cout << "                         Distance: " << match.distance << endl;
				matches_tmp.push_back(match);
			}
			matches_matrix[make_pair(i, j)] = matches_tmp;
			delete[] match_buf;
			//OpenCV匹配显示
			Mat cvImgMatches;
			drawMatches(images[i], cvKeypoints[i], images[j], cvKeypoints[j], matches_tmp, cvImgMatches);
			char showName[100];
			sprintf(showName, "%s%d%s%d", "Matches between ", i, " and ", j);
			namedWindow(showName, WINDOW_NORMAL);
			imshow(showName, cvImgMatches);
			waitKey(0);
		}
	}
	system("pause");
	return 0;
}

使用两幅Kermit图像进行测试的显示结果:
在这里插入图片描述在这里插入图片描述在这里插入图片描述控制台输出:
在这里插入图片描述

  • 1
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
Python是一种功能强大的编程语言,在计算机视觉领域广泛应用。OpenCV是一个开源的计算机视觉库,提供了许多处理图像和视频的函数和方法。SIFT是一种常用的特征提取算法,可以在图像中检测出关键点,并生成特征向量。在这里,我们将介绍如何使用Python结合OpenCV实现SIFT特征提取与匹配。 使用Python实现SIFT特征提取的基本步骤如下: 1. 导入OpenCV库并读取图像 ``` import cv2 img = cv2.imread('image.jpg') ``` 2. 创建SIFT对象并检测关键点 ``` sift = cv2.xfeatures2d.SIFT_create() kp, des = sift.detectAndCompute(img, None) ``` 通过使用SIFT对象的detectAndCompute()函数,我们可以对图像进行关键点检测和特征描述符提取,并将结果保存在两个变量kp和des中。 3. 可视化关键点并保存图像 ``` img_kp = cv2.drawKeypoints(img, kp, None) cv2.imshow('Keypoints', img_kp) cv2.imwrite('output.jpg', img_kp) cv2.waitKey() ``` 在这个步骤中,我们使用drawKeypoints()函数将检测到的关键点绘制在图像上,并可以通过imshow()函数显示图像。然后,我们可以使用imwrite()函数将图像保存到本地。 实现SIFT特征匹配的基本步骤如下: 1. 读取并检测两张图像的关键点和描述符 ``` import cv2 img1 = cv2.imread('image1.jpg') img2 = cv2.imread('image2.jpg') sift = cv2.xfeatures2d.SIFT_create() kp1, des1 = sift.detectAndCompute(img1, None) kp2, des2 = sift.detectAndCompute(img2, None) ``` 2. 创建并运行暴力匹配器 ``` bf = cv2.BFMatcher() matches = bf.knnMatch(des1, des2, k=2) ``` 3. 使用比值测试来筛选出良好的匹配 ``` good_matches = [] for m, n in matches: if m.distance < 0.75 * n.distance: good_matches.append([m]) ``` 4. 可视化匹配点并保存图像 ``` img_matched = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good_matches, None, flags=2) cv2.imshow('Matching result', img_matched) cv2.imwrite('output.jpg', img_matched) cv2.waitKey() ``` 在步骤2中,我们使用了暴力匹配器BFMatcher()来对特征描述符进行匹配。knnMatch()函数返回的是最近邻和次近邻的描述符距离,我们可以用比值测试筛选出距离最近的描述符,并将其作为好的匹配。最后,我们使用drawMatchesKnn()可以将匹配点绘制在图像上,并通过imshow()函数显示图像。最后,我们可以通过imwrite()函数保存图像。 综上所述,使用Python结合OpenCV实现SIFT特征提取和匹配非常简单。利用OpenCV中的函数和方法,我们可以轻松地处理图像和视频,实现各种计算机视觉应用。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值