平面对象识别就是将通过BF、Flann匹配器匹配的特征表示出来,通过矩形或其他形状框出匹配出的物体。主要用到两个API:
1、findHomography() ------>发现两个平面的透视变幻,生成透视变换矩阵
2、perspectiveTransform() ---------->透视变换
因为拍摄的照片因为角度问题而导致轮廓可能是这样的:
现在通过透视变换变成这样子:
1.findHomography( )
Mat findHomography(
InputArray srcPoints, //原始平面中点的坐标
InputArray dstPoints, //目标平面中点的坐标
int method = 0, //用于计算单应性矩阵的方法
double ransacReprojThreshold = 3,
OutputArray mask=noArray(), //通过鲁棒法(RANSAC或LMEDS)设置的可选输出掩码
const int maxIters = 2000, //RANSAC迭代的最大次数,2000是它可以达到的最大值
const double confidence = 0.995 //置信度
);
用于计算单应性矩阵的方法有:
0 :使用所有点的常规方法;
RANSAC:基于RANSAC的鲁棒法;
LMEDS :最小中值鲁棒法;
RHO :基于PROSAC的鲁棒法;
算子得到的结果即下面的H
2.perspectiveTransform()
void perspectiveTransform(
InputArray src, //输入双通道或三通道浮点数组/图像
OutputArray dst, //输出与src相同大小和类型的数组/图像
InputArray m //3x3或4x4浮点转换矩阵
);
DEMO:
//30 平面对象识别
void StartOp2::ImageProcess2_30()
{
Mat img1 = imread("../../Images/22.jpg", IMREAD_GRAYSCALE);
Mat img2 = imread("../../Images/23.jpg", IMREAD_GRAYSCALE);
if (!img1.data || !img2.data) {
}
imshow("image1", img1);
imshow("image2", img2);
// extraction surf featurs
int minHessian = 400;
Ptr<SURF> detector = SURF::create(minHessian);
vector<KeyPoint> keypoints_obj;
vector<KeyPoint> keypoints_scence;
Mat descriptor_obj, descript_scence;
detector->detectAndCompute(img1, Mat(), keypoints_obj, descriptor_obj);
detector->detectAndCompute(img2, Mat(), keypoints_scence, descript_scence);
//matching
FlannBasedMatcher matcher;
vector<DMatch> matches;
matcher.match(descriptor_obj, descript_scence, matches);
//find good matched points
double minDist = 1000;
double maxDist = 0;
for (int i=0; i<descriptor_obj.rows;i++)
{
double dist = matches[i].distance;
if (dist > maxDist) {
maxDist = dist;
}
if (dist < minDist) {
minDist = dist;
}
}
printf("max distance : %f\n", maxDist);
printf("min distance : %f\n", minDist);
vector<DMatch> goodMatches;
for (int i = 0; i < descriptor_obj.rows; i++) {
double dist = matches[i].distance;
if (dist < max(3 * minDist, 0.02)) {
goodMatches.push_back(matches[i]);
}
}
Mat matchesImg;
drawMatches(img1, keypoints_obj, img2, keypoints_scence, goodMatches, matchesImg, Scalar::all(-1),
Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS
);
vector<Point2f> obj;
vector<Point2f> objInScene;
//int queryIdx –>是测试图像的特征点描述符(descriptor)的下标,同时也是描述符对应特征点(keypoint)的下标。
//int trainIdx – > 是样本图像的特征点描述符的下标,同样也是相应的特征点的下标。
for (size_t t = 0; t < goodMatches.size(); t++) {
obj.push_back(keypoints_obj[goodMatches[t].queryIdx].pt);
objInScene.push_back(keypoints_scence[goodMatches[t].trainIdx].pt);
}
Mat H = findHomography(obj, objInScene, RANSAC);
vector<Point2f> obj_corners(4);
vector<Point2f> scene_corners(4);
obj_corners[0] = Point(0, 0);
obj_corners[1] = Point(img1.cols, 0);
obj_corners[2] = Point(img1.cols, img1.rows);
obj_corners[3] = Point(0, img1.rows);
perspectiveTransform(obj_corners, scene_corners, H);
// draw line
line(matchesImg, scene_corners[0] + Point2f(img1.cols, 0), scene_corners[1] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);
line(matchesImg, scene_corners[1] + Point2f(img1.cols, 0), scene_corners[2] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);
line(matchesImg, scene_corners[2] + Point2f(img1.cols, 0), scene_corners[3] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);
line(matchesImg, scene_corners[3] + Point2f(img1.cols, 0), scene_corners[0] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);
Mat dst;
cvtColor(img2, dst, COLOR_GRAY2BGR);
line(dst, scene_corners[0], scene_corners[1], Scalar(0, 0, 255), 2, 8, 0);
line(dst, scene_corners[1], scene_corners[2], Scalar(0, 0, 255), 2, 8, 0);
line(dst, scene_corners[2], scene_corners[3], Scalar(0, 0, 255), 2, 8, 0);
line(dst, scene_corners[3], scene_corners[0], Scalar(0, 0, 255), 2, 8, 0);
imshow("find known object demo", matchesImg);
imshow("Draw object", dst);
}
参考博客:
https://blog.csdn.net/akadiao/article/details/79172050
https://blog.csdn.net/Daker_Huang/article/details/84930573
https://www.learnopencv.com/homography-examples-using-opencv-python-c/