#代码解释#surf特征+FLANN特征匹配+knn筛选匹配点+单应性矩阵映射

surf特征+FLANN特征匹配+knn筛选匹配点+单应性矩阵映射(代码来源)

特征提取的三大步骤:

(0.图像预处理)

1.特征的提取

2.计算特征向量

3.特征匹配

(4.减少误匹配率等后处理过程)

 
//#include "stdafx.h"
#include <stdio.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include "opencv2/nonfree/features2d.hpp"
#include<opencv2/legacy/legacy.hpp>
#include <opencv2/highgui/highgui.hpp>

using namespace cv;
using namespace std;

int main()
{
	//Mat 一种数字矩阵表示数字图像
	Mat img_1 = imread("E:/海康威视云台摄像机控制/海康威视SDK/test/图片-液晶屏幕/向上/20180108144506_ch01.bmp", CV_LOAD_IMAGE_GRAYSCALE);//创建头部并分配矩阵
	Mat img_2 = imread("E:/海康威视云台摄像机控制/海康威视SDK/test/图片-液晶屏幕/向上/20180108144640_ch01.bmp", CV_LOAD_IMAGE_GRAYSCALE);

	if (!img_1.data || !img_2.data)//judge if the two photoes are same
	{
		return -1;
	}

	//-- Step 1: Detect the keypoints using SURF Detector 检测关键点
	int minHessian = 400;//其构造函数参数(minHessian)用来平衡提取到的特征点的数量和特征提取的稳定性的,对于不同的特征提取器改参数具有不同的含义和取值范围。

	SurfFeatureDetector detector(minHessian);

	vector<KeyPoint> keypoints_1, keypoints_2;//存放关键点的结果

	detector.detect(img_1, keypoints_1);//检测img_1的关键点keypoints_1
	detector.detect(img_2, keypoints_2);

	//-- Step 2: Calculate descriptors (feature vectors)  对得到的特征点提取并计算特征向量(特征描述符)
	SurfDescriptorExtractor extractor;//代表用Surf特征点检测,若为SiftFeatureDetector即为Sift特征点检测
	Mat descriptors_1, descriptors_2;//描述符

	extractor.compute(img_1, keypoints_1, descriptors_1);//计算img_1中点keypoints1的描述符
	extractor.compute(img_2, keypoints_2, descriptors_2);

	//-- Step 3: Matching descriptor vectors using FLANN matcher
	FlannBasedMatcher matcher;//FLANN特征匹配法(另:BruteForce暴力匹配,但效果不好)
	vector< DMatch > matches;//最后的匹配结果保存在vector<DMatch>中
	vector<vector<DMatch>> m_knnMatches;
	//vector<DescriptorMatcher> zjm1; 用来进行描述符匹配,具体用法请CDSN

	matches.clear();
	const float minRatio = 1.f / 1.5f;//minRatio一个比值
	//使用KNN-matching算法,令K=2。则每个match得到两个最接近的descriptor,然后计算最接近距离和次接近距离之间的比值,当比值大于既定值时,才作为最终match。为了尽量消除False-positive matches的误匹配
	// KNN match will return 2 nearest   
	// matches for each query descriptor,此处为descriptors_1,但是一般不是点集一为训练集点集二为查询集吗?
	matcher.knnMatch(descriptors_1, descriptors_2, m_knnMatches, 2);

	for (int i = 0; i<m_knnMatches.size(); i++)
	{
		const DMatch& bestMatch = m_knnMatches[i][0];
		const DMatch& betterMatch = m_knnMatches[i][1];

		float distanceRatio = bestMatch.distance / betterMatch.distance;//distanceRatio另一个比值

		if (distanceRatio<minRatio)
		{
			matches.push_back(bestMatch);
		}
	}

	vector< DMatch > good_matches;

	if (!matches.size())
	{
		cout << "matches is empty! " << endl;
		return -1;
	}
	else if (matches.size()<4)//匹配集至少含四个点,因为单应性矩阵
	{
		cout << matches.size() << " points matched is not enough " << endl;
	}
	else //单应性矩阵的计算最少得使用4个点
	{
		for (int i = 0; i < matches.size(); i++)
		{
			good_matches.push_back(matches[i]);
		}

		Mat img_matches;

		//drawMatches 参数含义:
		//good_matches - 源图像1的特征点匹配源图像2的特征点
		/*img_matches - 输出图像(具体由最后一个参数Flags决定)
		第一个Scalar::all(-1) - 匹配的颜色(特征点和连线), 若matchColor == Scalar::all(-1),颜色随机
		第二个Scalar::all(-1) - 单个点的颜色,即未配对的特征点,若matchColor == Scalar::all(-1),颜色随机
		vector<char>() - (定义 matchesMask – Mask)决定哪些点将被画出,若为空,则画出所有匹配点
		flags – Fdefined by DrawMatchesFlags*/

		drawMatches(img_1, keypoints_1, img_2, keypoints_2,
			good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
			vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);


		//☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★☆★
		//-- Localize the object from img_1 in img_2 
		vector<Point2f> obj;
		vector<Point2f> scene;

		for (int i = 0; i < good_matches.size(); i++)
		{
			//-- Get the keypoints from the good matches
			obj.push_back(keypoints_1[good_matches[i].queryIdx].pt);
			scene.push_back(keypoints_2[good_matches[i].trainIdx].pt);
		}
		Mat H = findHomography(obj, scene, CV_RANSAC);//CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,OutputArray mask, int method = 0, double ransacReprojThreshold = 3); mask - 随机样本一致性RANSAC方法

		//-- Get the corners from the image_1 ( the object to be "detected" )
		vector<Point2f> obj_corners(4);//应该是说定义确定单应性矩阵的四个角点,然后输入角点
		obj_corners[0] = cvPoint(0, 0);
		obj_corners[1] = cvPoint(img_1.cols, 0);
		obj_corners[2] = cvPoint(img_1.cols, img_1.rows);
		obj_corners[3] = cvPoint(0, img_1.rows);
		vector<Point2f> scene_corners(4);

		perspectiveTransform(obj_corners, scene_corners, H);//进行透射变换???透射变换是干什么的?

		for (int i = 0; i < 4; i++)
		{
			/* 作用和perspectiveTransform一样
			double x = obj_corners[i].x;
			double y = obj_corners[i].y;
			double Z = 1./( H.at<double>(2,0)*x + H.at<double>(2,1)*y + H.at<double>(2,2) );
			double X = ( H.at<double>(0,0)*x + H.at<double>(0,1)*y + H.at<double>(0,2) )*Z;
			double Y = ( H.at<double>(1,0)*x + H.at<double>(1,1)*y + H.at<double>(1,2) )*Z;
			scene_corners[i] = cvPoint( cvRound(X) + img_1.cols, cvRound(Y) );*/
			scene_corners[i].x += img_1.cols;
		}
		
		line(img_matches, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 2);
		line(img_matches, scene_corners[1], scene_corners[2], Scalar(0, 255, 0), 2);
		line(img_matches, scene_corners[2], scene_corners[3], Scalar(0, 255, 0), 2);
		line(img_matches, scene_corners[3], scene_corners[0], Scalar(0, 255, 0), 2);
		imshow("Good Matches & Object detection", img_matches);
	}
	

	waitKey(0);

	return 0;
}
  • 1
    点赞
  • 23
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值