opencv2.4.13.7---DenseSIFT

class CV_EXPORTS DenseFeatureDetector : public FeatureDetector
{
public:
    explicit DenseFeatureDetector( float initFeatureScale=1.f, int featureScaleLevels=1,
                                   float featureScaleMul=0.1f,
                                   int initXyStep=6, int initImgBound=0,
                                   bool varyXyStepWithScale=true,
                                   bool varyImgBoundWithScale=false );
    AlgorithmInfo* info() const;

protected:
    virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;

    double initFeatureScale;
    int featureScaleLevels;
    double featureScaleMul;

    int initXyStep;
    int initImgBound;

    bool varyXyStepWithScale;
    bool varyImgBoundWithScale;
};

 



/*
 *  DenseFeatureDetector
 */
DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featureScaleLevels,
                                      float _featureScaleMul, int _initXyStep,
                                      int _initImgBound, bool _varyXyStepWithScale,
                                      bool _varyImgBoundWithScale ) :
    initFeatureScale(_initFeatureScale), featureScaleLevels(_featureScaleLevels),
    featureScaleMul(_featureScaleMul), initXyStep(_initXyStep), initImgBound(_initImgBound),
    varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
{}


void DenseFeatureDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask ) const
{
    float curScale = static_cast<float>(initFeatureScale);
    int curStep = initXyStep;
    int curBound = initImgBound;
    for( int curLevel = 0; curLevel < featureScaleLevels; curLevel++ )
    {
        for( int x = curBound; x < image.cols - curBound; x += curStep )
        {
            for( int y = curBound; y < image.rows - curBound; y += curStep )
            {
                keypoints.push_back( KeyPoint(static_cast<float>(x), static_cast<float>(y), curScale) );
            }
        }

        curScale = static_cast<float>(curScale * featureScaleMul);
        if( varyXyStepWithScale ) curStep = static_cast<int>( curStep * featureScaleMul + 0.5f );
        if( varyImgBoundWithScale ) curBound = static_cast<int>( curBound * featureScaleMul + 0.5f );
    }

    KeyPointsFilter::runByPixelsMask( keypoints, mask );
}

 

 

opencv3中的实现:

https://answers.opencv.org/question/73165/compute-dense-sift-features-in-opencv-30/

int step = 10; // 10 pixels spacing between kp's

vector<KeyPoint> kps;
for (int i=step; i<img.rows-step; i+=step)
{
    for (int j=step; j<img.cols-step; j+=step)
    {
        // x,y,radius
        kps.push_back(KeyPoint(float(j), float(i), float(step)));
    }
}

Ptr<xfeatures2d::SIFT> sift = xfeatures2d::SIFT::create();
sift->compute(img,kps,features);

 

https://stackoverflow.com/questions/34104297/how-to-convert-given-coordinates-to-kaze-keypoints-in-python-with-opencv

https://stackoverflow.com/questions/39955272/python-cv2-provide-your-own-keypoints 

https://stackoverflow.com/questions/17981126/what-is-the-meaning-and-use-of-class-member-class-id-of-class-cvkeypoint-in-op

python版本

rows1, cols1 = img1.shape[:2]
rows2, cols2 = img2.shape[:2]
initXyStep=6
keypoint1 = []
for lrow in range(6, rows1-6, 6):
    for lcol in range(6, cols1 - 6, 6):
        keypoint = cv2.KeyPoint(lcol, lrow, 6, _class_id=0)
        keypoint1.append(keypoint)

keypoint2 = []
for lrow in range(6, rows2-6, 6):
    for lcol in range(6, cols2 - 6, 6):
        keypoint = cv2.KeyPoint(lcol, lrow, 6, _class_id=0)
        keypoint2.append(keypoint)
keypoint1, desc1 = sift.compute(img1, keypoint1)
keypoint2, desc2 = sift.compute(img2, keypoint2)

resultimg = img1.copy()
resultimg=cv2.drawKeypoints(img1,keypoint1,resultimg,flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
cv2.imwrite('denseSift_keypoints.png',resultimg)

 

/*!
 The Keypoint Class

 The class instance stores a keypoint, i.e. a point feature found by one of many available keypoint detectors, such as
 Harris corner detector, cv::FAST, cv::StarDetector, cv::SURF, cv::SIFT, cv::LDetector etc.

 The keypoint is characterized by the 2D position, scale
 (proportional to the diameter of the neighborhood that needs to be taken into account),
 orientation and some other parameters. The keypoint neighborhood is then analyzed by another algorithm that builds a descriptor
 (usually represented as a feature vector). The keypoints representing the same object in different images can then be matched using
 cv::KDTree or another method.
*/
class CV_EXPORTS_W_SIMPLE KeyPoint
{
public:
    //! the default constructor
    CV_WRAP KeyPoint() : pt(0,0), size(0), angle(-1), response(0), octave(0), class_id(-1) {}
    //! the full constructor
    KeyPoint(Point2f _pt, float _size, float _angle=-1,
            float _response=0, int _octave=0, int _class_id=-1)
            : pt(_pt), size(_size), angle(_angle),
            response(_response), octave(_octave), class_id(_class_id) {}
    //! another form of the full constructor
    CV_WRAP KeyPoint(float x, float y, float _size, float _angle=-1,
            float _response=0, int _octave=0, int _class_id=-1)
            : pt(x, y), size(_size), angle(_angle),
            response(_response), octave(_octave), class_id(_class_id) {}

    size_t hash() const;

    //! converts vector of keypoints to vector of points
    static void convert(const vector<KeyPoint>& keypoints,
                        CV_OUT vector<Point2f>& points2f,
                        const vector<int>& keypointIndexes=vector<int>());
    //! converts vector of points to the vector of keypoints, where each keypoint is assigned the same size and the same orientation
    static void convert(const vector<Point2f>& points2f,
                        CV_OUT vector<KeyPoint>& keypoints,
                        float size=1, float response=1, int octave=0, int class_id=-1);

    //! computes overlap for pair of keypoints;
    //! overlap is a ratio between area of keypoint regions intersection and
    //! area of keypoint regions union (now keypoint region is circle)
    static float overlap(const KeyPoint& kp1, const KeyPoint& kp2);

    CV_PROP_RW Point2f pt; //!< coordinates of the keypoints
    CV_PROP_RW float size; //!< diameter of the meaningful keypoint neighborhood
    CV_PROP_RW float angle; //!< computed orientation of the keypoint (-1 if not applicable);
                            //!< it's in [0,360) degrees and measured relative to
                            //!< image coordinate system, ie in clockwise.
    CV_PROP_RW float response; //!< the response by which the most strong keypoints have been selected. Can be used for the further sorting or subsampling
    CV_PROP_RW int octave; //!< octave (pyramid layer) from which the keypoint has been extracted
    CV_PROP_RW int class_id; //!< object class (if the keypoints need to be clustered by an object they belong to)
};

 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值