**
前言
**
这是寒假最后一个内容了。
**
寻找已知物体
**
在FLANN特征匹配的基础上,可以进一步利用Homography映射找出已知物体。具体来说就是利用findHomography函数通过匹配的关键点找出相应的变换,再利用perspectiveTransform函数映射点群。
- 使用findHomography寻找匹配上的关键点的变换
- 使用perspectiveTransform来映射点
寻找透视变换:findHomography()
Mat findHomography( InputArray srcPoints, //源平面上对应点,可以是CV_32FC2矩阵或vector<Point2f>
InputArray dstPoints,//目标平面上对应点,可以是CV_32FC2矩阵或vector<Point2f>
int method = 0, //计算单应矩阵的方法
double ransacReprojThreshold = 3,//范围1~10
OutputArray mask=noArray(), //掩码(如果上面的方法是FM_LMEDS /FM_RANSAC 的话)
const int maxIters = 2000,//RANSAC迭代最大次数
const double confidence = 0.995);//范围0~1
第三参数可选如下:
enum { FM_7POINT = 1, //!< 7-point algorithm
FM_8POINT = 2, //!< 8-point algorithm
FM_LMEDS = 4, //!< least-median algorithm. 7-point algorithm is used.
FM_RANSAC = 8 //!< RANSAC algorithm. It needs at least 15 points. 7-point algorithm is used.
};
进行透视矩阵变换:perspectiveTransform()
void perspectiveTransform(InputArray src, //输入图像
OutputArray dst, //输出图像
InputArray m );//变换矩阵3X3或者4X4浮点型矩阵
#include<opencv2/opencv.hpp>
#include<iostream>
#include<opencv2/xfeatures2d.hpp>
using namespace std;
using namespace cv;
int main() {
Mat src1 = imread("E:/File/3.jpg");
Mat src2 = imread("E:/File/4.jpg");
if (!src1.data || !src2.data) {
cout << "ERROR....";
system("pause");
}
resize(src1, src1, Size(400, 500));
resize(src2, src2, Size(400, 500));
//SURF检测关键点和提取描述符
int minHessian = 400;
Ptr<xfeatures2d::SURF> detector = xfeatures2d::SURF::create(minHessian);
vector<KeyPoint> keypoints_object, keypoints_scene;
detector->detect(src1, keypoints_object);
detector->detect(src2, keypoints_scene);
Mat descriptors_object, descriptors_scene;
Ptr<xfeatures2d::SURF> extractor = xfeatures2d::SURF::create();
extractor->compute(src1, keypoints_object, descriptors_object);
extractor->compute(src2, keypoints_scene, descriptors_scene);
//使用FLANN匹配算子
Ptr<FlannBasedMatcher> matcher = FlannBasedMatcher::create();
vector<DMatch> matches;
matcher->match(descriptors_object, descriptors_scene, matches);
//计算关键点间最大最小距离
double max_dist = 0, min_dist=100;
for (int i = 0; i < descriptors_object.rows; ++i) {
double dist = matches[i].distance;
if (dist < min_dist)min_dist = dist;
if (dist > max_dist)max_dist = dist;
}
printf(">Max dist 最大距离> %d\n", max_dist);
printf(">Min dist 最小距离> %d\n", min_dist);
//保存匹配距离小于3*min_dist的点对
vector<DMatch> goodMatches;
for (int i = 0; i < descriptors_object.rows; ++i) {
if (matches[i].distance < 3 * min_dist)
goodMatches.push_back(matches[i]);
}
//绘制出匹配到的关键点
Mat img_matches;
drawMatches(src1, keypoints_object,
src2, keypoints_scene,
goodMatches,
img_matches,
Scalar::all(-1),
Scalar::all(-1),
vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//定义局部变量
vector<Point2f> obj,scene;
//从匹配成功的匹配对中获取关键点
for (unsigned int i = 0; i < goodMatches.size(); ++i) {
obj.push_back(keypoints_object[goodMatches[i].queryIdx].pt);
scene.push_back(keypoints_scene[goodMatches[i].trainIdx].pt);
}
//计算透视变换
Mat H = findHomography(obj, scene, FM_RANSAC);
//从待测图片中获取角点
vector<Point2f> obj_corners(4),scene_corners(4);//只能用括号
obj_corners[0] = Point(0, 0);
obj_corners[1] = Point(src1.cols, 0);
obj_corners[2] = Point(src1.cols,src1.rows );
obj_corners[3] = Point(0, src1.rows);
//进行透视变换
perspectiveTransform(obj_corners, scene_corners, H);
//绘制出角点间的直线
for(int i=0;i<4;++i)
line(img_matches,
scene_corners[i] + Point2f(static_cast<float>(src1.cols), 0),
scene_corners[(i+1)%4] + Point2f(static_cast<float>(src1.cols), 0),
Scalar(255, 0, 123),
4
);
imshow("Good Matches & Object detection", img_matches);
while (char(waitKey(0)) < 0)
;
destroyAllWindows();
}
**
ORB算法
**
ORB(ORiented Brief)是brief算法改进版。ORB算法比sift算法效率高两个数量级,在计算速度上,ORB是sift的100倍,是surf的10倍。江湖上流传的说法是,ORB算法综合性能在各种测评里相较于其他特征提取算法是最好的。
要引出ORB算法,先由brief描述子入手。下面,介绍brief描述子。
Brief描述子
Brief(Binary Robust Independent Elementary Features)。
主要思路是在特征蒂娜附近随机选取若干点对,将这些点对的灰度值的大小,组合成一股二进制串,并将这个二进制串作为该特征点的特征描述子。
Brief优点是速度,缺点是:
- 不具备旋转不变性
- 对噪声敏感
- 不具备尺度不变性
而ORB就算试图解决上面1和2提出的一种新概念。
尺度不变性
ORB没有试图解决尺度不变性(因为FAST本身就具有尺度不变性),但是这样只求速度的特征描述子,一般都是应用在实时的视频处理中的,这样的话就可以通过追踪还有一些启发式的策略来解决尺度不变性问题。
Mat src = imread("E:/File/py.jpg");
resize(src, src, Size(300, 300));
if (src.empty()) {
cout << "cannot load image ....\n";
system("pause");
}
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
//检测ORB特征点并在图像中提取物体描述符
Ptr<ORB> detector = ORB::create();
vector<KeyPoint> keypoints;
Mat descriptors;
detector->detect(gray, keypoints);//检测特征点
Ptr<ORB> extractor = ORB::create();
extractor->compute(gray, keypoints, descriptors);//计算描述符(特征向量)
//基于Flann的描述符对象匹配
flann::Index my_flann_index(descriptors, flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);
//初始化视频采集对象
VideoCapture cap(0);
if (!cap.isOpened() ) {
cout << "视频对象打开出错......";
}
cap.set(CAP_PROP_FRAME_WIDTH, 360);
cap.set(CAP_PROP_FRAME_HEIGHT, 500);
while (waitKey(10) < 0) {
double time0 = static_cast<double>(getTickCount());
Mat capImg, capImg_gray;
cap >> capImg;
if (capImg.empty())
continue;
//flip(capImg, capImg, 0);//0垂直翻转 1水平翻转 -1垂直水平翻转
cvtColor(capImg, capImg_gray, COLOR_BGR2GRAY);
//检测ORB特征点并提取测试图像中的描述符
vector<KeyPoint> cap_keypoints;
Mat cap_descriptors;
detector->detect(capImg_gray, cap_keypoints);
extractor->compute(capImg_gray, cap_keypoints, cap_descriptors);
//匹配和测试描述符,获取两个最近的描述符
Mat matchIndex(cap_descriptors.rows, 2, CV_32SC1);
Mat matchDistance(cap_descriptors.rows, 2, CV_32FC1);
my_flann_index.knnSearch( //调用K邻近算法
cap_descriptors,
matchIndex,
matchDistance,
50,
flann::SearchParams()
);
//选出优秀的匹配
vector<DMatch> good_matches;
for(int i=0;i<matchDistance.rows;i++)
if (matchDistance.at<float>(i, 0) < 0.6 * matchDistance.at<float>(i, 1)) {
DMatch dmatches(
i,
matchIndex.at<int>(i, 0),
matchDistance.at<int>(i, 0));
good_matches.push_back(dmatches);
}
//绘制匹配点
Mat drawImg;
drawMatches(
capImg, cap_keypoints,
src, keypoints,
good_matches,
drawImg
);
imshow("匹配窗口",drawImg);
double frameRate = getTickFrequency() / (getTickCount() - time0);
cout << "帧率>>" << frameRate << endl;
}
destroyAllWindows();
参考:《Opencv3编程入门》