在网上的查到的代码大多是基于opencv2.4.9的特征检测算法,由于sift特征检测算法申请了专利,使得到了3.x版本以后需要配置contribe,但是以前的代码也是需要进行相应的转换。
下面的代码是基于opencv2.4.9版本的特征检测算法,进行了更改,使得该代码可以在opencv3.4.6以及更高的版本之中进行正常运行。
SIFT特征对旋转、尺度缩放、亮度变化等保持不变性,是一种非常稳定的局部特征。SIFT算法主要有以下几个步骤:
(1)高斯差分金字塔的构建
使用组和层的结构构建了一个具有线性关系的金字塔(尺度空间),这样可以在连续的高斯核尺度上查找图像的特征点;另外,它使用一阶的高斯差分来近似高斯的拉普拉斯核,大大的减少了运算量。
(2)尺度空间的极值检测及特征点的定位
搜索上一步建立的高斯尺度空间,通过高斯差分来识别潜在的对尺度和旋转不变的特征点。但是,在离散空间中,局部极值点可能并不是真正意义的极值点,真正的极值点有可能落在离散点的间隙中,SIFT通过尺度空间DoG函数进行曲线拟合寻找极值点。
(3)特征方向赋值
基于图像局部的梯度方向,分配给每个关键点位置一个或多个方向,后续的所有操作都是对于关键点的方向、尺度和位置进行变换,从而提供这些特征的不变性。
特征描述子的生成
通过上面的步骤已经找到的SIFT特征点的位置、方向、尺度信息,最后使用一组向量来描述特征点及其周围邻域像素的信息。
SURF算法,加速版的SIFT。SURF的流程和SIFT比较类似,这些改进体现在以下几个方面:
(1)特征点检测是基于Hessian矩阵,依据Hessian矩阵行列式的极值来定位特征点的位置。并且将Hession特征计算与高斯平滑结合在一起,两个操作通过近似处理得到一个核模板。
(2)在构建尺度空间时,使用box filter与源图像卷积,而不是使用DoG算子。
(3)SURF使用一阶Haar小波在x、y两个方向的响应作为构建特征向量的分布信息。
注意:这里其二者的算法是属于#include <opencv2/xfeatures2d/nonfree.hpp>这个头文件之中(opencv3.4.6),但是在opencv2.4.9之中是属于#include <opencv2/nonfree/features2d.hpp>之中。而且二者用法也有一丝丝不一样,可以通过官网看一下其对应的类的继承过程就可以。
#include <iostream>
#include <opencv2\opencv.hpp>
#include <opencv2\xfeatures2d\nonfree.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <cv.h>
#include <highgui.h>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
int main()
{
//创建检测器
//cv::Ptr<SiftFeatureDetector> detector = SiftFeatureDetector::create();//sift
Ptr<SurfFeatureDetector> detector = SurfFeatureDetector::create(100);//创建Surf特征检测器,可改成SURF/ORB
//Ptr<DescriptorExtractor> descriptor_extractor = DescriptorExtractor::create("SURF");//创建特征向量生成器,可改成SURF/ORB
Ptr<DescriptorMatcher> descriptor_matcher = DescriptorMatcher::create("BruteForce");//创建特征匹配器
if (detector.empty() )
cout << "fail to create detector!";
//读入图像
Mat img1 = imread("C:\\Users\\td\\Desktop\\合并版\\3.合并 - 四象+修正左相机 +放大器\\合并/左相机.bmp", 0);
Mat img2 = imread("C:\\Users\\td\\Desktop\\合并版\\3.合并 - 四象+修正左相机 +放大器\\合并/右相机.bmp", 0);
//特征点检测
vector<KeyPoint> m_LeftKey, m_RightKey;
detector->detect(img1, m_LeftKey);//检测img1中的SIFT特征点,存储到m_LeftKey中
detector->detect(img2, m_RightKey);
cout << "图像1特征点个数:" << m_LeftKey.size() << endl;
cout << "图像2特征点个数:" << m_RightKey.size() << endl;
vector<cv::KeyPoint> key_points_1, key_points_2;
cv::Mat dstImage1, dstImage2;
detector->detectAndCompute(img1, Mat(), key_points_1, dstImage1);
detector->detectAndCompute(img2, Mat(), key_points_2, dstImage2);
//根据特征点计算特征描述子矩阵,即特征向量矩阵
double t = getTickCount();//当前滴答数
t = ((double)getTickCount() - t) / getTickFrequency();
cout << "SIFT算法用时:" << t << "秒" << endl;
cout << "图像1特征描述矩阵大小:" << dstImage1.size()
<< ",特征向量个数:" << dstImage1.rows << ",维数:" << dstImage1.cols << endl;
cout << "图像2特征描述矩阵大小:" << dstImage2.size()
<< ",特征向量个数:" << dstImage2.rows << ",维数:" << dstImage2.cols << endl;
//画出特征点
Mat img_m_LeftKey, img_m_RightKey;
drawKeypoints(img1, m_LeftKey, img_m_LeftKey, Scalar::all(-1), 0);
drawKeypoints(img2, m_RightKey, img_m_RightKey, Scalar::all(-1), 0);
//imshow("Src1",img_m_LeftKey);
//imshow("Src2",img_m_RightKey);
//特征匹配
vector<DMatch> matches;//匹配结果
descriptor_matcher->match(dstImage1, dstImage2, matches);//匹配两个图像的特征矩阵
cout << "Match个数:" << matches.size() << endl;
//计算匹配结果中距离的最大和最小值
//距离是指两个特征向量间的欧式距离,表明两个特征的差异,值越小表明两个特征点越接近
double max_dist = 0;
double min_dist = 100;
for (int i = 0; i < matches.size(); i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
cout << "最大距离:" << max_dist << endl;
cout << "最小距离:" << min_dist << endl;
//筛选出较好的匹配点
vector<DMatch> goodMatches;
for (int i = 0; i < matches.size(); i++)
{
if (matches[i].distance < 0.2 * max_dist)
{
goodMatches.push_back(matches[i]);
}
}
cout << "goodMatch个数:" << goodMatches.size() << endl;
//画出匹配结果
Mat img_matches;
//红色连接的是匹配的特征点对,绿色是未匹配的特征点
drawMatches(img1, m_LeftKey, img2, m_RightKey, goodMatches, img_matches,
Scalar::all(-1), CV_RGB(0, 255, 0), Mat(), 2);
resize(img_matches, img_matches, Size(1280, 720));
imshow("MatchSIFT", img_matches);
IplImage result = img_matches;
waitKey(0);
//RANSAC匹配过程
vector<DMatch> m_Matches = goodMatches;
// 分配空间
int ptCount = (int)m_Matches.size();
Mat p1(ptCount, 2, CV_32F);
Mat p2(ptCount, 2, CV_32F);
// 把Keypoint转换为Mat
Point2f pt;
for (int i = 0; i < ptCount; i++)
{
pt = m_LeftKey[m_Matches[i].queryIdx].pt;
p1.at<float>(i, 0) = pt.x;
p1.at<float>(i, 1) = pt.y;
pt = m_RightKey[m_Matches[i].trainIdx].pt;
p2.at<float>(i, 0) = pt.x;
p2.at<float>(i, 1) = pt.y;
}
// 用RANSAC方法计算F
Mat m_Fundamental;
vector<uchar> m_RANSACStatus; // 这个变量用于存储RANSAC后每个点的状态
findFundamentalMat(p1, p2, m_RANSACStatus, FM_RANSAC);
// 计算野点个数
int OutlinerCount = 0;
for (int i = 0; i < ptCount; i++)
{
if (m_RANSACStatus[i] == 0) // 状态为0表示野点
{
OutlinerCount++;
}
}
int InlinerCount = ptCount - OutlinerCount; // 计算内点
cout << "内点数为:" << InlinerCount << endl;
// 这三个变量用于保存内点和匹配关系
vector<Point2f> m_LeftInlier;
vector<Point2f> m_RightInlier;
vector<DMatch> m_InlierMatches;
m_InlierMatches.resize(InlinerCount);
m_LeftInlier.resize(InlinerCount);
m_RightInlier.resize(InlinerCount);
InlinerCount = 0;
float inlier_minRx = img1.cols; //用于存储内点中右图最小横坐标,以便后续融合
for (int i = 0; i < ptCount; i++)
{
if (m_RANSACStatus[i] != 0)
{
m_LeftInlier[InlinerCount].x = p1.at<float>(i, 0);
m_LeftInlier[InlinerCount].y = p1.at<float>(i, 1);
m_RightInlier[InlinerCount].x = p2.at<float>(i, 0);
m_RightInlier[InlinerCount].y = p2.at<float>(i, 1);
m_InlierMatches[InlinerCount].queryIdx = InlinerCount;
m_InlierMatches[InlinerCount].trainIdx = InlinerCount;
if (m_RightInlier[InlinerCount].x < inlier_minRx) inlier_minRx = m_RightInlier[InlinerCount].x; //存储内点中右图最小横坐标
InlinerCount++;
}
}
// 把内点转换为drawMatches可以使用的格式
vector<KeyPoint> key1(InlinerCount);
vector<KeyPoint> key2(InlinerCount);
KeyPoint::convert(m_LeftInlier, key1);
KeyPoint::convert(m_RightInlier, key2);
// 显示计算F过后的内点匹配
Mat OutImage;
drawMatches(img1, key1, img2, key2, m_InlierMatches, OutImage);
cvNamedWindow("Match features", 1);
resize(OutImage, OutImage, Size(1280, 720));
cvShowImage("Match features", &IplImage(OutImage));
waitKey(0);
cvDestroyAllWindows();
//矩阵H用以存储RANSAC得到的单应矩阵
Mat H = findHomography(m_LeftInlier, m_RightInlier, RANSAC);
//存储左图四角,及其变换到右图位置
std::vector<Point2f> obj_corners(4);
obj_corners[0] = Point(0, 0); obj_corners[1] = Point(img1.cols, 0);
obj_corners[2] = Point(img1.cols, img1.rows); obj_corners[3] = Point(0, img1.rows);
std::vector<Point2f> scene_corners(4);
perspectiveTransform(obj_corners, scene_corners, H);
//画出变换后图像位置
Point2f offset((float)img1.cols, 0);
line(OutImage, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4);
line(OutImage, scene_corners[1] + offset, scene_corners[2] + offset, Scalar(0, 255, 0), 4);
line(OutImage, scene_corners[2] + offset, scene_corners[3] + offset, Scalar(0, 255, 0), 4);
line(OutImage, scene_corners[3] + offset, scene_corners[0] + offset, Scalar(0, 255, 0), 4);
resize(OutImage, OutImage, Size(1280, 720));
imshow("Good Matches & Object detection", OutImage);
while (1)
{
if (waitKey(100) == 19) cvSaveImage("E:\\warp_position.jpg", &IplImage(OutImage));
if (waitKey(100) == 27) break;
} //按esc继续,ctl+s保存图像
int drift = scene_corners[1].x; //储存偏移量
//新建一个矩阵存储配准后四角的位置
int width = int(max(abs(scene_corners[1].x), abs(scene_corners[2].x)));
int height = img1.rows; //或者:int height = int(max(abs(scene_corners[2].y), abs(scene_corners[3].y)));
float origin_x = 0, origin_y = 0;
if (scene_corners[0].x < 0) {
if (scene_corners[3].x < 0) origin_x += min(scene_corners[0].x, scene_corners[3].x);
else origin_x += scene_corners[0].x;
}
width -= int(origin_x);
if (scene_corners[0].y < 0) {
if (scene_corners[1].y) origin_y += min(scene_corners[0].y, scene_corners[1].y);
else origin_y += scene_corners[0].y;
}
//可选:height-=int(origin_y);
Mat imageturn = Mat::zeros(width, height, img1.type());
//获取新的变换矩阵,使图像完整显示
for (int i = 0; i < 4; i++) { scene_corners[i].x -= origin_x; } //可选:scene_corners[i].y -= (float)origin_y; }
Mat H1 = getPerspectiveTransform(obj_corners, scene_corners);
//进行图像变换,显示效果
warpPerspective(img1, imageturn, H1, Size(width, height));
resize(imageturn, imageturn, Size(1280, 720));
imshow("image_Perspective", imageturn);
waitKey(0);
//图像融合
int width_ol = width - int(inlier_minRx - origin_x);
int start_x = int(inlier_minRx - origin_x);
cout << "width: " << width << endl;
cout << "img1.width: " << img1.cols << endl;
cout << "start_x: " << start_x << endl;
cout << "width_ol: " << width_ol << endl;
uchar* ptr = imageturn.data;
double alpha = 0, beta = 1;
for (int row = 0; row < height; row++) {
ptr = imageturn.data + row * imageturn.step + (start_x)*imageturn.elemSize();
for (int col = 0; col < width_ol; col++)
{
uchar* ptr_c1 = ptr + imageturn.elemSize1(); uchar* ptr_c2 = ptr_c1 + imageturn.elemSize1();
uchar* ptr2 = img2.data + row * img2.step + (col + int(inlier_minRx))*img2.elemSize();
uchar* ptr2_c1 = ptr2 + img2.elemSize1(); uchar* ptr2_c2 = ptr2_c1 + img2.elemSize1();
alpha = double(col) / double(width_ol); beta = 1 - alpha;
if (*ptr == 0 && *ptr_c1 == 0 && *ptr_c2 == 0) {
*ptr = (*ptr2);
*ptr_c1 = (*ptr2_c1);
*ptr_c2 = (*ptr2_c2);
}
*ptr = (*ptr)*beta + (*ptr2)*alpha;
*ptr_c1 = (*ptr_c1)*beta + (*ptr2_c1)*alpha;
*ptr_c2 = (*ptr_c2)*beta + (*ptr2_c2)*alpha;
ptr += imageturn.elemSize();
}
}
//imshow("image_overlap", imageturn);
//waitKey(0);
Mat img_result = Mat::zeros(height, width + img2.cols - drift, img1.type());
uchar* ptr_r = imageturn.data;
for (int row = 0; row < height; row++) {
ptr_r = img_result.data + row * img_result.step;
for (int col = 0; col < imageturn.cols; col++)
{
uchar* ptr_rc1 = ptr_r + imageturn.elemSize1(); uchar* ptr_rc2 = ptr_rc1 + imageturn.elemSize1();
uchar* ptr = imageturn.data + row * imageturn.step + col * imageturn.elemSize();
uchar* ptr_c1 = ptr + imageturn.elemSize1(); uchar* ptr_c2 = ptr_c1 + imageturn.elemSize1();
*ptr_r = *ptr;
*ptr_rc1 = *ptr_c1;
*ptr_rc2 = *ptr_c2;
ptr_r += img_result.elemSize();
}
ptr_r = img_result.data + row * img_result.step + imageturn.cols*img_result.elemSize();
for (int col = imageturn.cols; col < img_result.cols; col++)
{
uchar* ptr_rc1 = ptr_r + imageturn.elemSize1(); uchar* ptr_rc2 = ptr_rc1 + imageturn.elemSize1();
uchar* ptr2 = img2.data + row * img2.step + (col - imageturn.cols + drift)*img2.elemSize();
uchar* ptr2_c1 = ptr2 + img2.elemSize1(); uchar* ptr2_c2 = ptr2_c1 + img2.elemSize1();
*ptr_r = *ptr2;
*ptr_rc1 = *ptr2_c1;
*ptr_rc2 = *ptr2_c2;
ptr_r += img_result.elemSize();
}
}
resize(img_result, img_result, Size(1280, 720));
imshow("image_result", img_result);
while (1)
{
if (waitKey(100) == 19) cvSaveImage("E:\\final_result.jpg", &IplImage(img_result));
if (waitKey(100) == 27) break; //按esc退出,ctl+s保存图像
}
return 0;
}