针对山区无人机影像地形起伏大、地貌特征复杂造成局部区域特征点探测难度大,匹配率不高的问题,本文通过改进算法,提高了山区无人机影像的特征匹配率。该算法首先采用AKAZE算法探测影像特征点,加入特征匹配多约束条件并基于单应性矩阵的RANSAC算法实现特征精匹配。同时为了验证改进算法的匹配精度,分别对SIFT、SURF两种算法进行试验对比分析,试验结果表明:针对山区无人机影像特点,通过改进算法能够较好的克服影像由于地形起伏大及地块特征破碎带来的影响,更好的剔除了特征点误匹配,匹配率更高。
SIFT、SURF等特征检测算法是通过构建金字塔以及多尺度来消除噪声和提取显著特征点,该类特征匹配方法牺牲了局部精度,导致影像造成边界模糊和细节丢失。AKAZE算法通过改进局部差分二进制描述符以及引入快速显示扩散数据框架快速求解偏微分方程。非线性扩散滤波描述图像亮度的演化是通过提升尺度参数作为热扩散函数的散度因子来控制扩散过程。通常采用偏微分方程进行求解,由于涉及微分方程非线性性质,通过图像亮度的扩散来构建尺度空间。
**试验运行环境为Intel(R) Core(TM) i5-2430M CPU @ 2.40 GHZ,运行内存6GB的笔记本电脑,基于VS2013的OpenCV2.4.10图像处理数据库作为数据处理平台。**
代码:
#include <opencv2/opencv.hpp>
#include<opencv2/xfeatures2d.hpp>
#include
#include <math.h>
#include “opencv2/calib3d.hpp”
//#include "ldb.h
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
int main(int argc, char** argv)
{
Mat img1 = imread(“001.jpg”);
Mat img2 = imread(“002.jpg”);
//Mat img1 = imread("3.JPG");
//Mat img2 = imread("4.JPG");
if (img1.empty() || img1.empty())
{
printf("error!!!!!!!!!");
return -1;
}
imshow("box image", img1);
imshow("scene image", img2);
//AKAZE特征提取
Ptr<AKAZE>detector = AKAZE::create();
vector<KeyPoint>keypoints_obj;
vector<KeyPoint>keypoints_scene;
int64 t = getTickCount();
int innersize = 0;
//extract akaze features
Mat descriptor_obj, descriptor_scene;
double t1 = getTickCount();
detector->detectAndCompute(img1, Mat(), keypoints_obj, descriptor_obj);
detector->detectAndCompute(img2, Mat(), keypoints_scene, descriptor_scene);
double t2 = getTickCount();
double tkaze = 1000 * (t2 - t1) / getTickFrequency();
printf("AKAZE Time consume(ms):%f", tkaze);
Mat keypointImg1;
Mat keypointImg2;
drawKeypoints(img1, keypoints_obj, keypointImg1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
drawKeypoints(img2, keypoints_scene, keypointImg2, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
imshow("keypoints_obj of img1", keypointImg1);
imshow("keypoints_sence of img2", keypointImg2);
//matching
FlannBasedMatcher matcher(new flann::LshIndexParams(20, 10, 2));
//BFMatcher matcher(NORM_L2);
vector<DMatch>matches;
matcher.match(descriptor_obj, descriptor_scene, matches, Mat());
//draw matches(key points)
Mat akazeMatchesImg;
drawMatches(img1, keypoints_obj, img2, keypoints_scene, matches, akazeMatchesImg);
imshow("akaze match result", akazeMatchesImg);
vector<DMatch>goodMatches;
double minDist = 1000, maxDist = 0;
for (int i = 0; i < descriptor_obj.rows; i++)
{
double dist = matches[i].distance;
if (dist < minDist)
{
minDist = dist;
}
if (dist > maxDist)
{
maxDist = dist;
}
}
printf(" min distance:%f\n", minDist);
for (int i = 0; i < descriptor_obj.rows; i++)
{
double dist = matches[i].distance;
if (dist < max(5 * minDist, 0.02))
{
goodMatches.push_back(matches[i]);
}
}
if (goodMatches.size() < 4)
{
cout << " 有效特征点数目小于4个,粗匹配失败 " << endl;
}
//通过RANSAC方法,对现有的特征点对进行“提纯”
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for (int a = 0; a < (int)goodMatches.size(); a++)
{
//分别将两处的good_matches对应的点对压入向量,只需要压入点的信息就可以
obj.push_back(keypoints_obj[goodMatches[a].queryIdx].pt);
scene.push_back(keypoints_scene[goodMatches[a].trainIdx].pt);
}
//计算单应矩阵
Mat H;
H = findHomography(obj, scene, CV_RANSAC);
if (H.rows < 3)
{
cout << " findHomography失败 " << endl;
}
//计算内点数目
Mat matObj;
Mat matScene;
CvMat* pcvMat = &(CvMat)H;
const double* Hmodel = pcvMat->data.db;
double Htmp = Hmodel[6];
for (int isize = 0; isize < obj.size(); isize++)
{
double ww = 1. / (Hmodel[6] * obj[isize].x + Hmodel[7] * obj[isize].y + 1.);
double dx = (Hmodel[0] * obj[isize].x + Hmodel[1] * obj[isize].y + Hmodel[2])*ww - scene[isize].x;
double dy = (Hmodel[3] * obj[isize].x + Hmodel[4] * obj[isize].y + Hmodel[5])*ww - scene[isize].y;
float err = (float)(dx*dx + dy * dy); //3个像素之内认为是同一个点
if (err < 9)
{
innersize = innersize + 1;
}
}
//打印内点占全部特征点的比率
double t3 = getTickCount();
double T_akaze = 1000 * (t3 - t1) / getTickFrequency();
printf("AKAZE T_Time consume(ms):%f\n", T_akaze);
float ff = (float)innersize / (float)goodMatches.size();
//drawMatches(img1, keypoints_obj, img2, keypoints_scene, innersize, akazeMatchesImg,
//Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
drawMatches(img1, keypoints_obj, img2, keypoints_scene, goodMatches, akazeMatchesImg,
Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("good match result", akazeMatchesImg);
cv::imwrite("E:/UAV-OpenCV/opencv3.1-Mytest/AKAZE/0513/good match result", akazeMatchesImg);
cout << "# Keypoints 1: \t" << keypoints_obj.size() << endl;
cout << "# Keypoints 2: \t" << keypoints_scene.size() << endl;
cout << "# Matches points: \t" << matches.size() << endl;
cout << "# goodMatches points: \t" << goodMatches.size() << endl;
cout << "# Match innerPoints: \t" << innersize << endl;
cout << "# Match ratio: \t" << ff << endl;
cout << "单应性矩阵 \t " << H << endl;
waitKey(0);
return 0;
}