【opencv】orb配准

#include <iostream>  
#include "opencv2/opencv.hpp"  
#include "opencv2/core/core.hpp"  
#include "opencv2/features2d/features2d.hpp"  
#include "opencv2/highgui/highgui.hpp"  
using namespace cv;  
using namespace std;  
int main(int argc, char** argv)  
{  double time0 = static_cast<double>(getTickCount( ));//记录起始时间
    Mat obj=imread("10.jpg");   //载入目标图像  
    Mat scene=imread("20.jpg"); //载入场景图像  
   
	cvtColor(obj,obj,CV_RGB2GRAY);
     cvtColor(scene,scene,CV_RGB2GRAY);
	if (obj.empty() || scene.empty() )  
    {  
        cout<<"Can't open the picture!\n";  
        return 0;  
    }  
    vector<KeyPoint> obj_keypoints,scene_keypoints;  
    Mat obj_descriptors,scene_descriptors;  
    ORB detector;     //采用ORB算法提取特征点  
    detector.detect(obj,obj_keypoints);  
    detector.detect(scene,scene_keypoints);  
    detector.compute(obj,obj_keypoints,obj_descriptors);  
    detector.compute(scene,scene_keypoints,scene_descriptors);  
    BFMatcher matcher(NORM_HAMMING,true); //汉明距离做为相似度度量  
    vector<DMatch> matches;  
    matcher.match(obj_descriptors, scene_descriptors, matches);  
    Mat match_img;  
    drawMatches(obj,obj_keypoints,scene,scene_keypoints,matches,match_img);  
    imshow("滤除误匹配前",match_img);  
  
    //保存匹配对序号  
    vector<int> queryIdxs( matches.size() ), trainIdxs( matches.size() );  
    for( size_t i = 0; i < matches.size(); i++ )  
    {  
        queryIdxs[i] = matches[i].queryIdx;  
        trainIdxs[i] = matches[i].trainIdx;  
    }     
  
    Mat H12;   //变换矩阵  
  
    vector<Point2f> points1; KeyPoint::convert(obj_keypoints, points1, queryIdxs);  
    vector<Point2f> points2; KeyPoint::convert(scene_keypoints, points2, trainIdxs);  
    int ransacReprojThreshold =1;  //拒绝阈值  
  
  
    H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold );  //寻找目标到检测场景的变换进行筛选内点
         cout<<"修正前变换矩阵为:\n"<<H12<<endl<<endl; //输出映射矩阵 
	
	// Mat imageTransform1;  
   // warpPerspective(scene,imageTransform1,H12,Size(obj.cols,obj.rows));    
 //   imshow("经过透视矩阵变换后",imageTransform1);  
  
	 
	int n=0;
  vector<char> matchesMask( matches.size(), 0 );    
    Mat points1t;  
  vector<Point2f> imagePoints1,imagePoints2;
	perspectiveTransform(Mat(points1), points1t, H12);  
   
	
	for( size_t i = 0; i < points1.size(); i++ )  //保存‘内点’  
    {  
        if( norm(points2[i] - points1t.at<Point2f>((int)i,0)) <= ransacReprojThreshold ) //给内点做标记,变换前后的距离进行筛选  
        {  
            matchesMask[i] = 1;  
       
		 imagePoints1.push_back(obj_keypoints[matches[i].queryIdx].pt);       
         imagePoints2.push_back(scene_keypoints[matches[i].trainIdx].pt); 
		   n++;
		}     
    }  
    Mat match_img2;   //滤除‘外点’后  
    drawMatches(obj,obj_keypoints,scene,scene_keypoints,matches,match_img2,Scalar(0,0,255),Scalar::all(-1),matchesMask);  
        imshow("滤除误匹配后",match_img2);  
		 cout<<n<<endl<<endl; //输出映射矩阵

		
		//vector<Point2f> imagePoints1,imagePoints2;      
   // for(int i=0;i<50;i++)  
 // {         
      // imagePoints1.push_back(obj_keypoints[matches[i].queryIdx].pt);       
       // imagePoints2.push_back(scene_keypoints[matches[i].trainIdx].pt);       
  // }  
  

    Mat homo=findHomography(Mat(imagePoints2),Mat(imagePoints1),CV_RANSAC);  
     cout<<"修正后变换矩阵为:\n"<<homo<<endl<<endl; //输出映射矩阵  
    //图像配准  
    Mat imageTransform1;  
    warpPerspective(scene,imageTransform1,homo,Size(obj.cols,obj.rows));    
    imshow("经过透视矩阵变换后",imageTransform1);  
  
  cout << ">帧率= " << getTickFrequency() / (getTickCount() - time0) << endl;
  
	 
	 waitKey(0);  
      
    return 0;  
}

 

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值