基于opencv库sift提取特征点算法去除误差大的控制点优化策略

提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档

1、什么是特征点提取
2、特征点提取主要包括哪些步骤
3、需要注意的问题


前言

本篇文章均是个人总结,有不足或错误的地方还请各位指点和纠正,栓Q,好了下面进入正题。
OpenCV库是用C++语言编写,它具有C ++,Python,Java和MATLAB接口,并支持Windows,Linux,Android和Mac OS,OpenCV主要倾向于实时视觉应用,并在可用时利用MMX和SSE指令, 如今也提供对于C#、Ch、Ruby,GO的支持。
OpenCV库里包含多种常用的特征点提取算法工具;Harris、SIFT、ORB、SURF等等,这里我们主要围绕SIFT算法展开说明;


提示:以下是本篇文章正文内容,下面案例可供参考

一、特征点提取是什么?

特征点提取属于特征提取中的一部分,主要用于机器学习、模式识别和图像处理中,特征点提取主要通过不同的算法或者手段对影像上的点及其周围地物的纹理特征进行解析,通过一定的规律或者相关性来提取所要获取的特征点。

二、特征点提取主要包括哪些步骤?

特征点检测
关键点查找
纹理特征提取
【注:这里不对特征点筛选、匹配以及误差剔除等进行阐述,后续等这一块成熟了,会通过文章形式发出】

三、需要注意的问题

1.影像读取

代码如下(示例):

GDALAllRegister();
clock_t start = clock();
printf("正在读取影像,请稍等!\n");

ImageInfo bzinfo;
string CoordSysStr_2 = "";
GDALDataset* bzpoDataset = GDALRead(bzFile, bzinfo);//读取基准影像,通过结构体存储影像的宽、高、波段数、六参数、投影、数据类型。
CoordSysStr_2 = GDALGetProjectionRef(bzpoDataset); //获取基准影像投影
printf("基准影像投影为:::: %s\n", bzinfo.proj); //bzinfo.proj 为const char*类型
Mat img_1 = GDAL2Mat(bzpoDataset, bzinfo, bzinfo.nbands);//1

ImageInfo pzinfo;
string CoordSysStr_1 = "";
GDALDataset* pzpoDataset = GDALRead(pzFile, pzinfo);//读取待处理影像,通过结构体存储影像的宽、高、波段数、六参数、投影、数据类型。
CoordSysStr_1 = GDALGetProjectionRef(pzpoDataset); //获取原始影像投影
printf("待处理影像投影为:::: %s\n", pzinfo.proj); //bzinfo.proj 为const char*类型

//使用构造函数对影像数据进行存储
Mat img_2 = GDAL2Mat(pzpoDataset, pzinfo, pzinfo.nbands);//4
//imshow("归一化1", img_1);
//imshow("归一化2", img_2);

2.关键点查找

printf("关键点查找开始!\n");
bool DKP = false;
vector<KeyPoint> keypoints_1;
vector<KeyPoint> keypoints_2;
DKP = DetectorKeyPoint(img_1, img_2, keypoints_1, keypoints_2);
if (DKP == false)
{
	return false;
}

DetectorKeyPoint方法内容如下:

Ptr<Feature2D> detector = cv::SIFT::create();
		detector->detect(img_1, keypoints_1);
		detector->detect(img_2, keypoints_2);

3.特征点按照200*200区域筛选

说明:这里我输入的影像大小是10001000大小的两幅影像进行的匹配测试,所以下面有些数值是针对10001000进行的操作,需要在大家使用时候进行修改;

//根据获取到的描述子进行匹配
		printf("匹配计算开始!\n");
		vector<vector<DMatch>> matchePoints;

		/*此处修改*/
		//对待校正影像上每个点分别映射到基准影像上相同行列号周围200*200像素范围内的点进行纹理特征匹配;
		//先读取待校正影像上提取的每个点的坐标、对应影像的行列号
		//根据行列号筛选基准影像上有效区域内的点个数以及对应的行列号
		//将每一个待校正影像点与200*200范围内的基准影像上点带入到下面循环,分别匹配筛选。

		//基准影像上控制点的行列号坐标
		double hang = 0.0;
		double lie = 0.0;
		Mapcoord2XY(pzinfo.adfGeoTransform, mapx, mapy, hang, lie);
		//Mapcoord2XY(double* dGeoTrans, double mapx, double mapy, double& Xpixel, double& Yline)
		cout << "基准影像行列号坐标!" << endl;
		cout << hang << "," << lie << endl;
		*/

		/*vector<KeyPoint> keypoints_11;
		vector<KeyPoint> keypoints_22;*/
		int size_ = 0;

		/*vector<KeyPoint> keypoints_11;
		vector<KeyPoint> keypoints_22;*/
		/*根据两幅瓦片内特征点200*200划分区域分别进行分开存入数组,后面分别针对相同行列号内的数据进行匹配*/
		vector<vector<KeyPoint>> keypoints_11;
		vector<vector<KeyPoint>> keypoints_22;
		/*keypoints_11.push_back(vector<KeyPoint>());
		keypoints_22.push_back(vector<KeyPoint>());*/
		 现在这行代码是安全的,但前提是 b 至少有一个元素
		//if (!keypoints_1.empty()) {
		//	keypoints_11[0].push_back(keypoints_1[0]);
		//}

		keypoints_11.resize(25);
		keypoints_22.resize(25);

		vector<Mat> descriptors_11vor;
		vector<Mat> descriptors_22vor;


		int a_1 = 0;
		int b_1 = 0;
		int a_11 = 0;
		int b_11 = 0;     

		cout << keypoints_1.size() <<endl;
		cout << keypoints_2.size() << endl;
		Mat descriptors_1;
		Mat descriptors_2;

		if (keypoints_1.size()<2 || keypoints_2.size()<2)
		{
			return false;
		}
		else
		{
			for (int i = 0; i < keypoints_1.size(); i++)
			{
				a_1 = int(keypoints_1[i].pt.x / 200);
				b_1 = int(keypoints_1[i].pt.y / 200);

				switch (a_1)
				{
				case 0:
				{
					switch (b_1)
					{
					case 0:
						keypoints_11[0].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 1:
						keypoints_11[1].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 2:
						keypoints_11[2].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 3:
						keypoints_11[3].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 4:
						keypoints_11[4].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					default:
						printf("当前特征点非1000*1000影像范围内,为无效值!\n");
						break;
					}
					break;
				}
				case 1:
				{
					switch (b_1)
					{
					case 0:
						keypoints_11[5].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 1:
						keypoints_11[6].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 2:
						keypoints_11[7].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 3:
						keypoints_11[8].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 4:
						keypoints_11[9].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					default:
						printf("当前特征点非1000*1000影像范围内,为无效值!\n");
						break;
					}
					break;
				}
				case 2:
				{
					switch (b_1)
					{
					case 0:
						keypoints_11[10].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 1:
						keypoints_11[11].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 2:
						keypoints_11[12].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 3:
						keypoints_11[13].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 4:
						keypoints_11[14].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					default:
						printf("当前特征点非1000*1000影像范围内,为无效值!\n");
						break;
					}
					break;
				}
				case 3:
				{
					switch (b_1)
					{
					case 0:
						keypoints_11[15].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 1:
						keypoints_11[16].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 2:
						keypoints_11[17].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 3:
						keypoints_11[18].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 4:
						keypoints_11[19].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					default:
						printf("当前特征点非1000*1000影像范围内,为无效值!\n");
						break;
					}
					break;
				}
				case 4:
				{
					switch (b_1)
					{
					case 0:
						keypoints_11[20].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 1:
						keypoints_11[21].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 2:
						keypoints_11[22].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 3:
						keypoints_11[23].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					case 4:
						keypoints_11[24].push_back(keypoints_1[i]); //keypoint:keypoints_1[i]
						break;
					default:
						printf("当前特征点非1000*1000影像范围内,为无效值!\n");
						break;
					}
					break;
				}
				default:
					printf("当前特征点非1000*1000影像范围内,为无效值!\n");
					break;
				}
			}

			for (int i = 0; i < keypoints_2.size(); i++)
			{
				a_11 = int(keypoints_2[i].pt.x / 200);
				b_11 = int(keypoints_2[i].pt.y / 200);
				switch (a_11)
				{
				case 0:
				{
					switch (b_11)
					{
					case 0:
						keypoints_22[0].push_back(keypoints_2[i]);
						break;
					case 1:
						keypoints_22[1].push_back(keypoints_2[i]);
						break;
					case 2:
						keypoints_22[2].push_back(keypoints_2[i]);
						break;
					case 3:
						keypoints_22[3].push_back(keypoints_2[i]);
						break;
					case 4:
						keypoints_22[4].push_back(keypoints_2[i]);
						break;
					default:
						printf("当前特征点非1000*1000影像范围内,为无效值!\n");
						break;
					}
					break;
				}
				case 1:
				{
					switch (b_11)
					{
					case 0:
						keypoints_22[5].push_back(keypoints_2[i]);
						break;
					case 1:
						keypoints_22[6].push_back(keypoints_2[i]);
						break;
					case 2:
						keypoints_22[7].push_back(keypoints_2[i]);
						break;
					case 3:
						keypoints_22[8].push_back(keypoints_2[i]);
						break;
					case 4:
						keypoints_22[9].push_back(keypoints_2[i]);
						break;
					default:
						printf("当前特征点非1000*1000影像范围内,为无效值!\n");
						break;
					}
					break;
				}	
				case 2:
				{
					switch (b_11)
					{
					case 0:
						keypoints_22[10].push_back(keypoints_2[i]);
						break;
					case 1:
						keypoints_22[11].push_back(keypoints_2[i]);
						break;
					case 2:
						keypoints_22[12].push_back(keypoints_2[i]);
						break;
					case 3:
						keypoints_22[13].push_back(keypoints_2[i]);
						break;
					case 4:
						keypoints_22[14].push_back(keypoints_2[i]);
						break;
					default:
						printf("当前特征点非1000*1000影像范围内,为无效值!\n");
						break;
					}
					break;
				}
				case 3:
				{
					switch (b_11)
					{
					case 0:
						keypoints_22[15].push_back(keypoints_2[i]);
						break;
					case 1:
						keypoints_22[16].push_back(keypoints_2[i]);
						break;
					case 2:
						keypoints_22[17].push_back(keypoints_2[i]);
						break;
					case 3:
						keypoints_22[18].push_back(keypoints_2[i]);
						break;
					case 4:
						keypoints_22[19].push_back(keypoints_2[i]);
						break;
					default:
						printf("当前特征点非1000*1000影像范围内,为无效值!\n");
						break;
					}
					break;
				}
				case 4:
				{
					switch (b_11)
					{
					case 0:
						keypoints_22[20].push_back(keypoints_2[i]);
						break;
					case 1:
						keypoints_22[21].push_back(keypoints_2[i]);
						break;
					case 2:
						keypoints_22[22].push_back(keypoints_2[i]);
						break;
					case 3:
						keypoints_22[23].push_back(keypoints_2[i]);
						break;
					case 4:
						keypoints_22[24].push_back(keypoints_2[i]);
						break;
					default:
						printf("当前特征点非1000*1000影像范围内,为无效值!\n");
						break;
					}
					break;
				}
				default:
					printf("当前特征点非1000*1000影像范围内,为无效值!\n");
					break;
				}
			}

			cout << keypoints_11[0].size() <<endl;
			printf("描述子计算开始!\n");

			for (int i = 0; i < keypoints_11.size(); i++)//keypoints_11.size()
			{
				if (keypoints_11[i].size() == 0 || keypoints_22[i].size() == 0)
				{
					printf("当前相同区域内待校正或基准影像的点数为0不进行匹配!\n");
				}
				else
				{
					bool CD_2 = false;
					/*Mat descriptors_11;
					Mat descriptors_22;*/
					Mat descriptors_11;
					Mat descriptors_22;
					CD_2 = ComputeDescriptor(img_1, img_2, keypoints_11[i], keypoints_22[i], descriptors_11, descriptors_22);
					if (CD_2 == false)
					{
						printf("开始!\n");
						/*descriptors_11.release();
						descriptors_22.release();*/
						continue;
					}
					else
					{
						printf("开始!\n");
						//此处判断描述子矩阵行数,128*行数,knnMatch匹配至少是2对多,或者多对二,0个或1个无法匹配,系统会出现断言错误,try...catch无法捕获。
						if (descriptors_11.rows <2 || descriptors_22.rows < 2)
						{
							cout << "影像控制点数量小于2个无法进行匹配!\n" << endl;
							continue;
						}
						else
						{
							printf("开始!\n");
							int size_2 = 0;
							size_2 = FeatureMatch(matchePoints, descriptors_11, descriptors_22); //每个200*200范围内的控制点进行匹配
							size_ = size_ + size_2;

							descriptors_11vor.push_back(descriptors_11);
							descriptors_22vor.push_back(descriptors_22);
							
						}		
					}
					descriptors_11.release();
					descriptors_22.release();
				}
			}
		}

通过上述代码我们可以直接获取10001000大小的影像,按照200200行列号进行划分,分成25个区间,每个区间的特征点和纹理特征分别按照25个区域进行存储,这样我们可以只对这每个区间分别匹配,不同区间不匹配,这里可以去除很多大误差点,这些点用于后续的校正、或其他操作均可减少误差和影响,也减少后续的误差筛选步骤。我这里写的有点太简单,太笨拙,大家可以在这个思想上进行优化。也可在评论提取宝贵意见和好的方法我来实现,进行发布。

4.需要注意的点

代码使用错误1如下:
vector<vector> a;
vector b;
a[0].push_back(b[0]);
正确方式如下:

#include <vector>
// 假设 KeyPoint 已经被定义
vector<vector<KeyPoint>> a;
vector<KeyPoint> b;
// 确保 a 至少有一个内部向量
a.push_back(vector<KeyPoint>());
// 假设 b 被正确填充了至少一个 KeyPoint 元素
// b.push_back(someKeyPoint);
// 现在这行代码是安全的,但前提是 b 至少有一个元素
if (!b.empty()) {
    a[0].push_back(b[0]);
}

代码使用错误2如下:
vector<vector> a;
vector b;
a[2].push_back(b[0])
正确方式如下:

#include <vector>
// 假设 KeyPoint 已经被定义
vector<vector<KeyPoint>> a;
vector<KeyPoint> b;
// 确保a有足够的大小
a.resize(3); // 现在a至少有3个元素(每个元素都是空的vector<KeyPoint>)
// 假设 b 被正确填充了至少一个 KeyPoint 元素
// b.push_back(someKeyPoint);
// 在执行以下操作之前确保b不为空
if (!b.empty()) {
    a[2].push_back(b[0]);
}

注意!注意!注意!
错误3:这里有个很重要的cv::Mat方法使用,注意这里是cv::Mat不是正常的直接定义的Mat矩阵,这里通过vector来读写操作时容易出错。如果想实现多个cv::Mat按照顺序存入一个cv::Mat时需要注意如何合并,方法很多,这里我就不写了,大家可以网上查询或者搜索GPT4.0.嘿嘿(●ˇ∀ˇ●)。

总结

有很多很多算法想要表述,但是时间有限,每篇文章只花费10分钟到20分钟的时间,还请大家谅解,后面时间充沛时,会在文章中尽量多发高质量,高技术文章。

  • 24
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值