最近在学习OpenCV,一般看官方一边看书,发现自己原来用的很多接口早已被更新,分享一下学习心得体会,也希望大家可以不吝赐教!
首先看到在Mastering OpenCV with Practical Computer Vision Projects书中,特征点检测,特征点描述(特征提取),特征点匹配用了以下代码:
- cv::Ptr<cv::FeatureDetector> detector = new cv::ORB(1000); // 创建orb特征点检测
- cv::Ptr<cv::DescriptorExtractor> extractor = new cv::FREAK(true, true); // 用Freak特征来描述特征点
- cv::Ptr<cv::DescriptorMatcher> matcher = new cv::BFMatcher(cv::NORM_HAMMING, // 特征匹配,计算Hamming距离
- class CV_EXPORTS_W ORB : public Feature2D
- class CV_EXPORTS_W Feature2D : public FeatureDetector, public DescriptorExtractor
原来ORB,Feature2D,FeatureDetecter以及DescriptorExtractor之间是这样的继承关系,所以我们可以new一个ORB对象给FeatureDetecter指针了。再搜索一下文档和头文件,还有更多的检测方法,特征描述可以使用。其中ORB既可以作为检测器,也可以作特征提取。
可以作为检测器的还有BRISK,MSER(特征区域),FastFeatureDetector(应该就是Orb吧?),StarDetector等等。
可以作特征提取的描述器包括BriefDescriptorExtractor(应该就是Orb吧?)Freak,OpponentColorDescriptorExtractor等等。
原来OpenCV提供了好多现成的方法,好方便……感叹一下,以前辛辛苦苦码的代码很多就浪费了,用好工具还是很重要那!>_< 顺带一提的是,2.4.5中新增的CLAHE二值化方法也是类似的调用方法:
- cv::Ptr<cv::CLAHE> cl = createCLAHE(80, Size(4, 4))
- cl->apply(imSrc, ImDst);
CV自带的特征匹配和特征匹配结果绘制函数简直易用的令人发指……请看:
- vector<DMatch> matches;
- matcher->match(descriptors1, descriptors2, matches);
- Mat imResultOri;
- drawMatches(img1, keypoints1, img2, keypoints2, matches, imResultOri, CV_RGB(0,255,0), CV_RGB(0,255,0));
当然啦,做完特征点匹配,我们还可以通过RANSAC方法计算透视变换矩阵来筛选符合相同透视的特征点,这样做可以去除很多错误的匹配。
- std::vector<unsigned char> inliersMask(srcPoints.size());
- <span style="font-family: Arial, Helvetica, sans-serif;">homography = cv::findHomography(srcPoints, dstPoints, CV_FM_RANSAC, reprojectionThreshold, inliersMask);</span>
到此,一个简单的匹配任务就算是完成啦。
看完这部分内容最大的心得体会就是,作为一个写工程代码的人来说,要好好的去学习和掌握工具,可以避免好多没有意义的重复劳动。
跑了一个例子:
运行结果:
特征匹配:
一致的透视变换:
光流:
以上使用的代码是Mastering OpenCV with Practical Computer Vision Projects书上的源码经整理以后的代码,顺便尝试了一下光流算法的调用。通过对这段源码的学习,基本能够掌握OpenCV2.4版本以后检测,特征提取与匹配方法。如果调用遇到困难,还是可以直接查看源码来的更快捷。
- #include <iostream>
- #include <fstream>
- #include <sstream>
- #include "opencv2/opencv.hpp"
- using namespace cv;
- using namespace std;
- void KeyPointsToPoints(vector<KeyPoint> kpts, vector<Point2f> &pts);
- bool refineMatchesWithHomography(
- const std::vector<cv::KeyPoint>& queryKeypoints,
- const std::vector<cv::KeyPoint>& trainKeypoints,
- float reprojectionThreshold, std::vector<cv::DMatch>& matches,
- cv::Mat& homography);
- /** @function main */
- int main(int argc, char* argv[]) {
- /************************************************************************/
- /* 特征点检测,特征提取,特征匹配,计算投影变换 */
- /************************************************************************/
- // 读取图片
- Mat img1Ori = imread("1.jpg", 0);
- Mat img2Ori = imread("2.jpg", 0);
- // 缩小尺度
- Mat img1, img2;
- resize(img1Ori, img1, Size(img1Ori.cols / 4, img1Ori.cols / 4));
- resize(img2Ori, img2, Size(img2Ori.cols / 4, img2Ori.cols / 4));
- cv::Ptr<cv::FeatureDetector> detector = new cv::ORB(1000); // 创建orb特征点检测
- cv::Ptr<cv::DescriptorExtractor> extractor = new cv::FREAK(true, true); // 用Freak特征来描述特征点
- cv::Ptr<cv::DescriptorMatcher> matcher = new cv::BFMatcher(cv::NORM_HAMMING, // 特征匹配,计算Hamming距离
- true);
- vector<KeyPoint> keypoints1; // 用于保存图中的特征点
- vector<KeyPoint> keypoints2;
- Mat descriptors1; // 用于保存图中的特征点的特征描述
- Mat descriptors2;
- detector->detect(img1, keypoints1); // 检测第一张图中的特征点
- detector->detect(img2, keypoints2);
- extractor->compute(img1, keypoints1, descriptors1); // 计算图中特征点位置的特征描述
- extractor->compute(img2, keypoints2, descriptors2);
- vector<DMatch> matches;
- matcher->match(descriptors1, descriptors2, matches);
- Mat imResultOri;
- drawMatches(img1, keypoints1, img2, keypoints2, matches, imResultOri,
- CV_RGB(0,255,0), CV_RGB(0,255,0));
- cout << "[Info] # of matches : " << matches.size() << endl;
- Mat matHomo;
- refineMatchesWithHomography(keypoints1, keypoints2, 3, matches, matHomo);
- cout << "[Info] Homography T : " << matHomo << endl;
- cout << "[Info] # of matches : " << matches.size() << endl;
- Mat imResult;
- drawMatches(img1, keypoints1, img2, keypoints2, matches, imResult,
- CV_RGB(0,255,0), CV_RGB(0,255,0));
- // 计算光流
- vector<uchar> vstatus;
- vector<float> verrs;
- vector<Point2f> points1;
- vector<Point2f> points2;
- KeyPointsToPoints(keypoints1, points1);
- calcOpticalFlowPyrLK(img1, img2, points1, points2, vstatus, verrs);
- Mat imOFKL = img1.clone();
- for (int i = 0; i < vstatus.size(); i++) {
- if (vstatus[i] && verrs[i] < 15) {
- line(imOFKL, points1[i], points2[i], CV_RGB(255,255,255), 1, 8, 0);
- circle(imOFKL, points2[i], 3, CV_RGB(255,255,255), 1, 8, 0);
- }
- }
- imwrite("opt.jpg", imOFKL);
- imwrite("re1.jpg", imResultOri);
- imwrite("re2.jpg", imResult);
- imshow("Optical Flow", imOFKL);
- imshow("origin matches", imResultOri);
- imshow("refined matches", imResult);
- waitKey();
- return -1;
- }
- bool refineMatchesWithHomography(
- const std::vector<cv::KeyPoint>& queryKeypoints,
- const std::vector<cv::KeyPoint>& trainKeypoints,
- float reprojectionThreshold, std::vector<cv::DMatch>& matches,
- cv::Mat& homography) {
- const int minNumberMatchesAllowed = 8;
- if (matches.size() < minNumberMatchesAllowed)
- return false;
- // Prepare data for cv::findHomography
- std::vector<cv::Point2f> srcPoints(matches.size());
- std::vector<cv::Point2f> dstPoints(matches.size());
- for (size_t i = 0; i < matches.size(); i++) {
- srcPoints[i] = trainKeypoints[matches[i].trainIdx].pt;
- dstPoints[i] = queryKeypoints[matches[i].queryIdx].pt;
- }
- // Find homography matrix and get inliers mask
- std::vector<unsigned char> inliersMask(srcPoints.size());
- homography = cv::findHomography(srcPoints, dstPoints, CV_FM_RANSAC,
- reprojectionThreshold, inliersMask);
- std::vector<cv::DMatch> inliers;
- for (size_t i = 0; i < inliersMask.size(); i++) {
- if (inliersMask[i])
- inliers.push_back(matches[i]);
- }
- matches.swap(inliers);
- return matches.size() > minNumberMatchesAllowed;
- }
- void KeyPointsToPoints(vector<KeyPoint> kpts, vector<Point2f> &pts) {
- for (int i = 0; i < kpts.size(); i++) {
- pts.push_back(kpts[i].pt);
- }
- return;
- }
原文地址:http://blog.csdn.net/u010141147/article/details/9464571