opencv:feature2D学习——SIFT和SURF算法实现目标检测

1059 篇文章 286 订阅

当前使用版本opencv3.4.0,需要安装opencv_contrib

surf特征点检测

  • surf算法为每个检测到的特征定义了位置和尺度,尺度值可以用于定义围绕特征点的窗口大小,不论物体的尺度在窗口是怎么样的,都将包含相同的视觉信息,这些信息用于表示特征点以使得它们与众不同。

SURF 算法,全称是 Speeded-Up Robust Features

#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include <iostream>
#include <opencv2/calib3d/calib3d_c.h>


using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;

int main()
{
    Mat srcImage1 = imread( "/home/oceanstar/桌面/1.png", CV_LOAD_IMAGE_GRAYSCALE );
    Mat srcImage2 = imread( "/home/oceanstar/桌面/2.png", CV_LOAD_IMAGE_GRAYSCALE );
    if( !srcImage1.data || !srcImage2.data )//检测是否读取成功
    { printf("读取图片错误,请确定目录下是否有imread函数指定名称的图片存在~! \n"); return false; }
   

    int minHessian = 400;  //定义SURF中的hessian阈值特征点检测算子
    // SURF与SurfFeatureDetector等价
     //定义一个SurfFeatureDetector(SURF) 特征检测类对象
    Ptr<cv::xfeatures2d::SurfFeatureDetector>detector = cv::xfeatures2d::SurfFeatureDetector::create(minHessian);
    std::vector<KeyPoint> keypoints_1, keypoints_2;//vector模板类是能够存放任意类型的动态数组,能够增加和压缩数据

    //【3】调用detect函数检测出SURF特征关键点,保存在vector容器中
    detector->detect( srcImage1, keypoints_1 );
    detector->detect( srcImage2, keypoints_2 );

    //【4】绘制特征关键点.
    Mat img_keypoints_1; Mat img_keypoints_2;
    drawKeypoints( srcImage1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
    // drawKeypoints( srcImage1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
    drawKeypoints( srcImage2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );

    //【5】显示效果图
    imshow("特征点检测效果图1", img_keypoints_1 );
    imshow("特征点检测效果图2", img_keypoints_2 );

    waitKey(0);
    return 0;
}

在这里插入图片描述

绘制关键点:drawKeypoints

CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
                               const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT );

SURF特征描述

#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include <iostream>
#include <opencv2/calib3d/calib3d_c.h>


using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;

int main()
{
    Mat srcImage1 = imread( "/home/oceanstar/桌面/1.png", CV_LOAD_IMAGE_GRAYSCALE );
    Mat srcImage2 = imread( "/home/oceanstar/桌面/2.png", CV_LOAD_IMAGE_GRAYSCALE );
    if( !srcImage1.data || !srcImage2.data )//检测是否读取成功
    { printf("读取图片错误,请确定目录下是否有imread函数指定名称的图片存在~! \n"); return false; }


    int minHessian = 3000;  //定义SURF中的hessian阈值特征点检测算子
     //定义一个SurfFeatureDetector(SURF) 特征检测类对象
    Ptr<cv::xfeatures2d::SurfFeatureDetector>detector = cv::xfeatures2d::SurfFeatureDetector::create(minHessian);
    std::vector<KeyPoint> keypoints_1, keypoints_2;//vector模板类是能够存放任意类型的动态数组,能够增加和压缩数据

    //【3】调用detect函数检测出SURF特征关键点,保存在vector容器中
    detector->detect( srcImage1, keypoints_1 );
    detector->detect( srcImage2, keypoints_2 );

    ///【4】 使用SIFT算子提取特征(计算特征向量)
    Ptr<xfeatures2d::SurfDescriptorExtractor> extractor = xfeatures2d::SurfDescriptorExtractor::create();
    Mat descriptors1, descriptors2;
    extractor->compute( srcImage1, keypoints_1, descriptors1 );
    extractor->compute( srcImage2, keypoints_2, descriptors2 );

    //【5】使用BruteForce进行匹配
    // 实例化一个匹配器
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
    std::vector< DMatch > matches;
    //匹配两幅图中的描述子(descriptors)
    matcher->match( descriptors1, descriptors2, matches );

    //【6】绘制从两个图像中匹配出的关键点
    Mat imgMatches;
    drawMatches( srcImage1, keypoints_1, srcImage2, keypoints_2, matches, imgMatches );//进行绘制

    //【7】显示效果图
    imshow("匹配图", imgMatches );
    waitKey(0);
    return 0;
}

在这里插入图片描述

使用FLANN进行特征点匹配

#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include <iostream>
#include <opencv2/calib3d/calib3d_c.h>


using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;

int main()
{
    //【1】载入源图片
    Mat srcImage_1 = imread( "/home/oceanstar/桌面/1.png", CV_LOAD_IMAGE_GRAYSCALE );
    Mat srcImage_2 = imread( "/home/oceanstar/桌面/2.png", CV_LOAD_IMAGE_GRAYSCALE );
    if( !srcImage_1.data || !srcImage_2.data )//检测是否读取成功
    { printf("读取图片错误,请确定目录下是否有imread函数指定名称的图片存在~! \n"); return false; }


    //【2】利用SURF检测器检测的关键点
    int minHessian = 3000;
    Ptr<SURF>detector = SURF::create(minHessian);
    std::vector<KeyPoint> keypoints_1, keypoints_2;
    detector->detect( srcImage_1, keypoints_1 );
    detector->detect( srcImage_2, keypoints_2 );

    ///【4】 使用SIFT算子提取特征(计算特征向量)
    Ptr<SURF> extractor = SurfDescriptorExtractor::create();
    Mat descriptors_1, descriptors_2;
    extractor->compute( srcImage_1, keypoints_1, descriptors_1 );
    extractor->compute( srcImage_2, keypoints_2, descriptors_2 );

    //【4】采用FLANN算法匹配描述符向量
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
    std::vector< DMatch > matches;
    matcher->match( descriptors_1, descriptors_2, matches );

    //【5】快速计算关键点之间的最大和最小距离
    double max_dist = 0; double min_dist = 100;
    for( int i = 0; i < descriptors_1.rows; i++ )
    {
        double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }
    printf("> 最大距离(Max dist) : %f \n", max_dist );
    printf("> 最小距离(Min dist) : %f \n", min_dist );

    //【6】存下符合条件的匹配结果(即其距离小于2* min_dist的),使用radiusMatch同样可行
    std::vector< DMatch > good_matches;
    for( int i = 0; i < descriptors_1.rows; i++ )
    {
        if( matches[i].distance < 2*min_dist )
        { good_matches.push_back( matches[i]); }
    }



    //【7】绘制出符合条件的匹配点
    Mat img_matches;
    drawMatches( srcImage_1, keypoints_1, srcImage_2, keypoints_2,
                 good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    //【8】输出相关匹配点信息
    for( int i = 0; i < good_matches.size(); i++ )
    { printf( ">符合条件的匹配点 [%d] 特征点1: %d  -- 特征点2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }


    //【9】显示效果图
    imshow( "匹配效果图", img_matches );


    //按任意键退出程序
    waitKey(0);
    return 0;
}

在这里插入图片描述

寻找已知物体

#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include <iostream>
#include <opencv2/calib3d/calib3d_c.h>
#include <cv.hpp>


using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;

int main()
{
    //【1】载入源图片
    Mat srcImage_1 = imread( "/home/oceanstar/桌面/1.png", CV_LOAD_IMAGE_GRAYSCALE );
    Mat srcImage_2 = imread( "/home/oceanstar/桌面/2.png", CV_LOAD_IMAGE_GRAYSCALE );
    if( !srcImage_1.data || !srcImage_2.data )//检测是否读取成功
    { printf("读取图片错误,请确定目录下是否有imread函数指定名称的图片存在~! \n"); return false; }


    //【2】利用SURF检测器检测的关键点
    int minHessian = 300;
    Ptr<SURF>detector = SURF::create(minHessian);
    std::vector<KeyPoint> keypoints_1, keypoints_2;
    detector->detect( srcImage_1, keypoints_1 );
    detector->detect( srcImage_2, keypoints_2 );


    ///【4】 使用SIFT算子提取特征(计算特征向量)
    Ptr<SURF> extractor = SurfDescriptorExtractor::create();
    Mat descriptors_1, descriptors_2;
    extractor->compute( srcImage_1, keypoints_1, descriptors_1 );
    extractor->compute( srcImage_2, keypoints_2, descriptors_2 );

   //【5】使用FLANN匹配算子进行匹配
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
    std::vector< DMatch > matches;
    matcher->match( descriptors_1, descriptors_2, matches );

    //【5】快速计算关键点之间的最大和最小距离
    double max_dist = 0; double min_dist = 100;
    for( int i = 0; i < descriptors_1.rows; i++ )
    {
        double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }
    printf("> 最大距离(Max dist) : %f \n", max_dist );
    printf("> 最小距离(Min dist) : %f \n", min_dist );

    //【6】存下符合条件的匹配结果(即其距离小于3* min_dist的),使用radiusMatch同样可行
    std::vector< DMatch > good_matches;
    for( int i = 0; i < descriptors_1.rows; i++ )
    {
        if( matches[i].distance < 3*min_dist )
        { good_matches.push_back( matches[i]); }
    }



    //【7】绘制出符合条件的匹配点
    Mat img_matches;
    drawMatches( srcImage_1, keypoints_1, srcImage_2, keypoints_2,
                 good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    //定义两个局部变量
    vector<Point2f> obj;
    vector<Point2f> scene;

    //从匹配成功的匹配对中获取关键点
    for( unsigned int i = 0; i < good_matches.size(); i++ )
    {
        obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
        scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
    }

    Mat H = findHomography( obj, scene, CV_RANSAC );//计算透视变换

    //从待测图片中获取角点
    vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( srcImage_1.cols, 0 );
    obj_corners[2] = cvPoint( srcImage_1.cols, srcImage_1.rows ); obj_corners[3] = cvPoint( 0, srcImage_1.rows );
    vector<Point2f> scene_corners(4);

    //进行透视变换
    perspectiveTransform( obj_corners, scene_corners, H);

    //绘制出角点之间的直线
    line( img_matches, scene_corners[0] + Point2f( static_cast<float>(srcImage_1.cols), 0), scene_corners[1] + Point2f( static_cast<float>(srcImage_1.cols), 0), Scalar(255, 0, 123), 4 );
    line( img_matches, scene_corners[1] + Point2f( static_cast<float>(srcImage_1.cols), 0), scene_corners[2] + Point2f( static_cast<float>(srcImage_1.cols), 0), Scalar( 255, 0, 123), 4 );
    line( img_matches, scene_corners[2] + Point2f( static_cast<float>(srcImage_1.cols), 0), scene_corners[3] + Point2f( static_cast<float>(srcImage_1.cols), 0), Scalar( 255, 0, 123), 4 );
    line( img_matches, scene_corners[3] + Point2f( static_cast<float>(srcImage_1.cols), 0), scene_corners[0] + Point2f( static_cast<float>(srcImage_1.cols), 0), Scalar( 255, 0, 123), 4 );

    //显示最终结果
    imshow( "效果图", img_matches );

    //按任意键退出程序
    waitKey(0);
    return 0;
}

FLANN结合SURF进行关键点的描述和匹配

#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include <iostream>
#include <opencv2/calib3d/calib3d_c.h>
#include <cv.hpp>


using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;

int main()
{


    //【1】载入图像、显示并转化为灰度图
    Mat trainingImg = imread("/home/oceanstar/桌面/1.png");
    if(trainingImg.empty()){
        cout << "capture_mat capture_mat capture_mat capture_mat" << endl;
        return 0;
    }
    imshow("原始图",trainingImg);
    cvtColor(trainingImg, trainingImg, CV_BGR2GRAY);

    //【2】检测SIFT关键点、提取训练图像描述符
    //【2】检测SIFT关键点、提取训练图像描述符
    int minHessian = 700;
    vector<KeyPoint> train_keyPoint;
    Mat trainDescription;
    Ptr<SiftFeatureDetector>featureDetector = SiftFeatureDetector::create(minHessian);
    Ptr<SiftDescriptorExtractor> featureExtractor = SiftDescriptorExtractor::create();
    featureDetector->detect(trainingImg, train_keyPoint);
    featureExtractor->compute(trainingImg, train_keyPoint, trainDescription);

    // 【3】进行基于描述符的暴力匹配
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
    vector<Mat> train_desc_collection(1, trainDescription);
    matcher->add(train_desc_collection);
    matcher->train();

    //【4】创建视频对象、定义帧率
    VideoCapture cap(0);
    unsigned int frameCount = 0;//帧数

    //【5】不断循环,直到q键被按下
    while(char(waitKey(1)) != 'q')
    {
        //<1>参数设置
        auto time0 = static_cast<double>(getTickCount( ));//记录起始时间
        Mat srcFrameImg;
        cap >> srcFrameImg;//采集视频到testImage中
        if(srcFrameImg.empty())
            continue;

        //<2>转化图像到灰度
        cvtColor(srcFrameImg, srcFrameImg, CV_BGR2GRAY);

        //<3>检测SURF关键点、提取测试图像描述符
        vector<KeyPoint> test_keyPoint;
        Mat testDescriptor;
        featureDetector->detect(srcFrameImg, test_keyPoint);
        featureExtractor->compute(srcFrameImg, test_keyPoint, testDescriptor);

        //<4>匹配训练和测试描述符
        vector<vector<DMatch> > matches;
        matcher->knnMatch(testDescriptor, matches, 2);

        // <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
        vector<DMatch> goodMatches;
        for(auto & matche : matches)
        {
            if(matche[0].distance < 0.6 * matche[1].distance)
                goodMatches.push_back(matche[0]);
        }


        //<6>绘制匹配点并显示窗口
        Mat dstImage;
        drawMatches(srcFrameImg, test_keyPoint, trainingImg, train_keyPoint, goodMatches, dstImage);
        imshow("匹配窗口", dstImage);

        //<7>输出帧率信息
        cout << "\t>当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
    }

    return 0;
}

SIFT配合暴力匹配进行关键点描述和提取

当前使用版本opencv4.3

#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/xfeatures2d.hpp>

using namespace cv;
using namespace std;


int main()
{

    //【1】载入图像、显示并转化为灰度图
    Mat trainImage = imread( "/home/oceanstar/桌面/1.png" ), trainImage_gray;
    imshow("原始图",trainImage);
    cvtColor(trainImage, trainImage_gray, COLOR_BGR2GRAY);

    //【2】检测SIFT关键点、提取训练图像描述符
    //【2】检测SIFT关键点、提取训练图像描述符
    int minHessian = 700;
    vector<KeyPoint> train_keyPoint;
    Mat trainDescription;
    Ptr<cv::xfeatures2d::SiftFeatureDetector>featureDetector = cv::xfeatures2d::SiftFeatureDetector::create(minHessian);
    Ptr<xfeatures2d::SiftDescriptorExtractor> featureExtractor = xfeatures2d::SiftDescriptorExtractor::create();
    featureDetector->detect(trainImage, train_keyPoint);
    featureExtractor->compute(trainImage, train_keyPoint, trainDescription);

    std::vector<KeyPoint> keypoints_1, keypoints_2;//vector模板类是能够存放任意类型的动态数组,能够增加和压缩数据



    ///【4】 使用SIFT算子提取特征(计算特征向量)
    Mat descriptors1, descriptors2;
    featureExtractor->compute( trainImage, keypoints_1, descriptors1 );
    featureExtractor->compute( trainImage, keypoints_2, descriptors2 );

    // 【3】进行基于描述符的暴力匹配
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
    vector<Mat> train_desc_collection(1, trainDescription);
    matcher->add(train_desc_collection);
    matcher->train();

    //【4】创建视频对象、定义帧率
    VideoCapture cap("rtmp://************************");
    unsigned int frameCount = 0;//帧数

    //5】不断循环,直到q键被按下
    while(char(waitKey(1)) != 'q')
    {
        //<1>参数设置
        double time0 = static_cast<double>(getTickCount( ));//记录起始时间
        Mat captureImage, captureImage_gray;
        cap >> captureImage;//采集视频到testImage中
        if(captureImage.empty())
            continue;

        //<2>转化图像到灰度
        cvtColor(captureImage, captureImage_gray, COLOR_BGR2GRAY);

        //<3>检测SURF关键点、提取测试图像描述符
        vector<KeyPoint> test_keyPoint;
        Mat testDescriptor;
        featureDetector->detect(captureImage_gray, test_keyPoint);
        featureExtractor->compute(captureImage_gray, test_keyPoint, testDescriptor);

        //<4>匹配训练和测试描述符
        vector<vector<DMatch> > matches;
        matcher->knnMatch(testDescriptor, matches, 2);

        // <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
        vector<DMatch> goodMatches;
        for(unsigned int i = 0; i < matches.size(); i++)
        {
            if(matches[i][0].distance < 0.6 * matches[i][1].distance) // 0.6
                goodMatches.push_back(matches[i][0]);
        }

        //<6>绘制匹配点并显示窗口
        Mat dstImage;
        drawMatches(captureImage, test_keyPoint, trainImage, train_keyPoint, goodMatches, dstImage);
        resize(dstImage, dstImage, Size(dstImage.cols / 2, dstImage.rows / 2), 0, 0, INTER_LINEAR_EXACT);
        imshow("匹配窗口", dstImage);

        //<7>输出帧率信息
        cout << "\t>当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
    }

    return 0;
}

在这里插入图片描述

  • 4
    点赞
  • 27
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值