opencv-python实现sift、orb、asift、denseSift

pip install opencv-python == 3.4.2.16
pip install opencv-contrib-python == 3.4.2.16

 

1.ORB

# coding:utf-8
import cv2
import numpy as np

from contextlib import contextmanager
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()

@contextmanager
def Timer(msg):
    print(msg, '...',)
    start = clock()
    try:
        yield
    finally:
        print("%.2f ms" % ((clock()-start)*1000))

def filter_matches(kp1, kp2, matches, ratio = 0.75):
    mkp1, mkp2 = [], []
    for m in matches:
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            m = m[0]
            mkp1.append( kp1[m.queryIdx] )
            mkp2.append( kp2[m.trainIdx] )
    p1 = np.float32([kp.pt for kp in mkp1])
    p2 = np.float32([kp.pt for kp in mkp2])
    kp_pairs = zip(mkp1, mkp2)
    return p1, p2, list(kp_pairs)


with Timer('matching'):
    debug=1
    if debug==1:
        path1 = "./1_IMG_Texture_8Bit.png"
        path2 = "./3_IMG_Texture_8Bit.png"
    else:
        path1 = "./OtherSampleFrame_IMG_Texture_8Bit_48.png"
        path2 = "./OtherSampleFrame_IMG_Texture_8Bit_52.png"

    img1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(path2, cv2.IMREAD_GRAYSCALE)
    orb = cv2.ORB_create(500000)
    keypoint1, desc1 = orb.detectAndCompute(img1, None)
    keypoint2, desc2 = orb.detectAndCompute(img2, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
    matches = bf.knnMatch(desc1, desc2, k=2)


    #是否按照ratio比率剔除
    if 1:
        p1, p2, kp_pairs = filter_matches(keypoint1, keypoint2, matches)
    else:
        mkp1, mkp2 = [], []
        for m in matches:
            m = m[0]
            mkp1.append( keypoint1[m.queryIdx] )
            mkp2.append( keypoint2[m.trainIdx] )
        p1 = np.float32([kp.pt for kp in mkp1])
        p2 = np.float32([kp.pt for kp in mkp2])
        kp_pairs = zip(mkp1, mkp2)
        kp_pairs = list(kp_pairs)

    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 20.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]

        img2temp=img2.copy()
        img2temp = cv2.cvtColor(img2temp, cv2.COLOR_GRAY2BGR)
        H_inv = np.linalg.inv(H)
        leftnew_array = np.ones((3, 1), dtype=np.float64)
        leftnew_array[0][0]=843.53
        leftnew_array[1][0]=230.07
        leftnew_array_1 = np.dot(H, leftnew_array)
        leftnew_array_2 = leftnew_array_1/leftnew_array_1[2][0]

        rightnew_array = np.ones((3, 1), dtype=np.float64)
        rightnew_array[0][0]=966.61
        rightnew_array[1][0]=447.38
        rightnew_array_1 = np.dot(H, rightnew_array)
        rightnew_array_2 = rightnew_array_1/rightnew_array_1[2][0]

        cv2.rectangle(img2temp, (int(leftnew_array_2[0][0]+0.5), int(leftnew_array_2[1][0]+0.5)),
                      (int(rightnew_array_2[0][0]+0.5), int(rightnew_array_2[1][0]+0.5)), (0, 255, 0), 3)
        #GT
        cv2.rectangle(img2temp, (int(239.82 + 0.5), int(704.91 + 0.5)),
                      (int(389.82 + 0.5), int(985.34 + 0.5)), (0, 0, 255), 3)
        cv2.imwrite("./result_orb_rectangle.png", img2temp)

    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' % len(p1))

    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((h1+h2, w1), np.uint8)
    vis[:h1, :w1] = img1
    vis[h1:h1+h2, :w1] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    p1, p2 = [], []
    for kpp in kp_pairs:
        p1.append(np.int32(kpp[0].pt))
        p2.append(np.int32(np.array(kpp[1].pt) + [0, h1]))

    green = (0, 255, 0)
    for (x1, y1), (x2, y2) in zip(p1, p2):
        col = green
        r = 1
        thickness = 2
        cv2.line(vis, (int(x1), int(y1)), (int(x2), int(y2)), col, thickness)
    cv2.imwrite("./result_orb.png", vis)

2、sift

# coding:utf-8
import cv2
import numpy as np

from contextlib import contextmanager
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()

@contextmanager
def Timer(msg):
    print(msg, '...',)
    start = clock()
    try:
        yield
    finally:
        print("%.2f ms" % ((clock()-start)*1000))

def filter_matches(kp1, kp2, matches, ratio = 0.75):
    mkp1, mkp2 = [], []
    for m in matches:
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            m = m[0]
            mkp1.append( kp1[m.queryIdx] )
            mkp2.append( kp2[m.trainIdx] )
    p1 = np.float32([kp.pt for kp in mkp1])
    p2 = np.float32([kp.pt for kp in mkp2])
    kp_pairs = zip(mkp1, mkp2)
    return p1, p2, list(kp_pairs)

with Timer('matching'):
    debug=1
    if debug==1:
        path1 = "./1_IMG_Texture_8Bit.png"
        path2 = "./3_IMG_Texture_8Bit.png"
    else:
        path1 = "./OtherSampleFrame_IMG_Texture_8Bit_48.png"
        path2 = "./OtherSampleFrame_IMG_Texture_8Bit_52.png"

    img1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(path2, cv2.IMREAD_GRAYSCALE)

    sift = cv2.xfeatures2d.SIFT_create()
    keypoint1, desc1 = sift.detectAndCompute(img1, None)
    keypoint2, desc2 = sift.detectAndCompute(img2, None)

    flann=1
    if flann==1:
        """
        FLANN是类似最近邻的快速匹配库
            它会根据数据本身选择最合适的算法来处理数据
            比其他搜索算法快10倍
        """
        #原始是 FLANN_INDEX_KDTREE=0 trees=5
        FLANN_INDEX_KDTREE = 1
        indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=6)
        searchParams = dict(checks=50)
        flann = cv2.FlannBasedMatcher(indexParams, searchParams)
        matches = flann.knnMatch(desc1, desc2, k=2)
    else:
        bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)
        matches = bf.knnMatch(desc1, desc2, k=2)


    #是否按照ratio比率剔除
    if 1:
        p1, p2, kp_pairs = filter_matches(keypoint1, keypoint2, matches)
    else:
        mkp1, mkp2 = [], []
        for m in matches:
            m = m[0]
            mkp1.append( keypoint1[m.queryIdx] )
            mkp2.append( keypoint2[m.trainIdx] )
        p1 = np.float32([kp.pt for kp in mkp1])
        p2 = np.float32([kp.pt for kp in mkp2])
        kp_pairs = zip(mkp1, mkp2)
        kp_pairs = list(kp_pairs)

    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 20.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]

        img2temp=img2.copy()
        img2temp = cv2.cvtColor(img2temp, cv2.COLOR_GRAY2BGR)
        H_inv = np.linalg.inv(H)
        leftnew_array = np.ones((3, 1), dtype=np.float64)
        leftnew_array[0][0]=843.53
        leftnew_array[1][0]=230.07
        leftnew_array_1 = np.dot(H, leftnew_array)
        leftnew_array_2 = leftnew_array_1/leftnew_array_1[2][0]

        rightnew_array = np.ones((3, 1), dtype=np.float64)
        rightnew_array[0][0]=966.61
        rightnew_array[1][0]=447.38
        rightnew_array_1 = np.dot(H, rightnew_array)
        rightnew_array_2 = rightnew_array_1/rightnew_array_1[2][0]

        cv2.rectangle(img2temp, (int(leftnew_array_2[0][0]+0.5), int(leftnew_array_2[1][0]+0.5)),
                      (int(rightnew_array_2[0][0]+0.5), int(rightnew_array_2[1][0]+0.5)), (0, 255, 0), 3)
        #GT
        cv2.rectangle(img2temp, (int(239.82 + 0.5), int(704.91 + 0.5)),
                      (int(389.82 + 0.5), int(985.34 + 0.5)), (0, 0, 255), 3)
        cv2.imwrite("./result_sift_rectangle.png", img2temp)

    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' % len(p1))

    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((h1+h2, w1), np.uint8)
    vis[:h1, :w1] = img1
    vis[h1:h1+h2, :w1] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    p1, p2 = [], []
    for kpp in kp_pairs:
        p1.append(np.int32(kpp[0].pt))
        p2.append(np.int32(np.array(kpp[1].pt) + [0, h1]))

    green = (0, 255, 0)
    for (x1, y1), (x2, y2) in zip(p1, p2):
        col = green
        r = 1
        thickness = 2
        cv2.line(vis, (int(x1), int(y1)), (int(x2), int(y2)), col, thickness)
    cv2.imwrite("./result_sift.png", vis)

3、ASIFT

https://github.com/opencv/opencv/blob/master/samples/python/asift.py

# coding:utf-8
import cv2
import numpy as np

from contextlib import contextmanager
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()

@contextmanager
def Timer(msg):
    print(msg, '...',)
    start = clock()
    try:
        yield
    finally:
        print("%.2f ms" % ((clock()-start)*1000))

FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
FLANN_INDEX_LSH    = 6
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.xfeatures2d.SIFT_create()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF_create(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB_create(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE_create()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK_create()
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher

def filter_matches(kp1, kp2, matches, ratio = 0.75):
    mkp1, mkp2 = [], []
    for m in matches:
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            m = m[0]
            mkp1.append( kp1[m.queryIdx] )
            mkp2.append( kp2[m.trainIdx] )
    p1 = np.float32([kp.pt for kp in mkp1])
    p2 = np.float32([kp.pt for kp in mkp2])
    kp_pairs = zip(mkp1, mkp2)
    return p1, p2, list(kp_pairs)

def explore_match(img1, img2, kp_pairs, status = None, H = None):
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((h1 + h2, w1), np.uint8)
    vis[:h1, :w1] = img1
    vis[h1:h1 + h2, :w1] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    if status is None:
        status = np.ones(len(kp_pairs), np.bool_)
    p1, p2 = [], []  # python 2 / python 3 change of zip unpacking
    for kpp in kp_pairs:
        p1.append(np.int32(kpp[0].pt))
        p2.append(np.int32(np.array(kpp[1].pt) +  [0, h1]))

    green = (0, 255, 0)
    for (x1, y1), (x2, y2) in zip(p1, p2):
        col = green
        r = 1
        thickness = 2
        cv2.line(vis, (int(x1), int(y1)), (int(x2), int(y2)), col, thickness)
    cv2.imwrite("./result_asift.png", vis)

def affine_skew(tilt, phi, img, mask=None):
    '''
    affine_skew(tilt, phi, img, mask=None) -> skew_img, skew_mask, Ai
    Ai - is an affine transform matrix from skew_img to img
    '''
    h, w = img.shape[:2]
    if mask is None:
        mask = np.zeros((h, w), np.uint8)
        mask[:] = 255
    A = np.float32([[1, 0, 0], [0, 1, 0]])
    if phi != 0.0:
        phi = np.deg2rad(phi)
        s, c = np.sin(phi), np.cos(phi)
        A = np.float32([[c,-s], [ s, c]])
        corners = [[0, 0], [w, 0], [w, h], [0, h]]
        tcorners = np.int32( np.dot(corners, A.T) )
        x, y, w, h = cv2.boundingRect(tcorners.reshape(1,-1,2))
        A = np.hstack([A, [[-x], [-y]]])
        img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
    if tilt != 1.0:
        s = 0.8*np.sqrt(tilt*tilt-1)
        img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
        img = cv2.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
        A[0] /= tilt
    if phi != 0.0 or tilt != 1.0:
        h, w = img.shape[:2]
        mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
    Ai = cv2.invertAffineTransform(A)
    return img, mask, Ai


def affine_detect(detector, img, mask=None):
    '''
    affine_detect(detector, img, mask=None, pool=None) -> keypoints, descrs
    Apply a set of affine transformations to the image, detect keypoints and
    reproject them into initial image coordinates.
    See http://www.ipol.im/pub/algo/my_affine_sift/ for the details.
    ThreadPool object may be passed to speedup the computation.
    '''
    hh, ww = img.shape[:2]
    params = [(1.0, 0.0)]
    for t in 2**(0.5*np.arange(1,6)):
        for phi in np.arange(0, 180, 72.0 / t):
            params.append((t, phi))

    keypointa_all, descrs_all = [], []

    for i, (k, d) in enumerate(params):
        t, phi = k, d
        timg, tmask, Ai = affine_skew(t, phi, img)
        #img_disp = cv2.bitwise_and(timg, timg, mask=tmask);

        keypoints, descrs = detector.detectAndCompute(timg, tmask)
        for kp in keypoints:
            x, y = kp.pt
            kp.pt = tuple( np.dot(Ai, (x, y, 1)) )
            # Out of bounds judgment
            if ((kp.pt[0]<0) or (kp.pt[1]<0) or (kp.pt[0] > ww-1) or (kp.pt[1] > hh-1)):
                if (kp.pt[0] < 0):
                    kp.pt = (0, kp.pt[1])
                if (kp.pt[1] < 0):
                    kp.pt = (kp.pt[0], 0)
                if (kp.pt[0] > ww-1):
                    kp.pt = (ww-1, kp.pt[1])
                if (kp.pt[1] > hh-1):
                    kp.pt = (kp.pt[0], hh-1)
        if descrs is None:
            descrs = []
        keypointa_all.extend(keypoints)
        descrs_all.extend(descrs)

    return keypointa_all, np.array(descrs_all)


with Timer('matching'):
    debug=1
    if debug==1:
        path1 = "./1_IMG_Texture_8Bit.png"
        path2 = "./3_IMG_Texture_8Bit.png"
    else:
        path1 = "./OtherSampleFrame_IMG_Texture_8Bit_48.png"
        path2 = "./OtherSampleFrame_IMG_Texture_8Bit_52.png"

    img1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(path2, cv2.IMREAD_GRAYSCALE)
    detector, matcher = init_feature('sift')

    kp1, desc1 = affine_detect(detector, img1)
    kp2, desc2 = affine_detect(detector, img2)
    print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))

    raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
    p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 50.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        # do not draw outliers (there will be a lot of them)
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]

        img2temp=img2.copy()
        img2temp = cv2.cvtColor(img2temp, cv2.COLOR_GRAY2BGR)
        #H_inv = np.linalg.inv(H)
        leftnew_array = np.ones((3, 1), dtype=np.float64)
        leftnew_array[0][0]=843.53
        leftnew_array[1][0]=230.07
        leftnew_array_1 = np.dot(H, leftnew_array)
        leftnew_array_2 = leftnew_array_1/leftnew_array_1[2][0]

        rightnew_array = np.ones((3, 1), dtype=np.float64)
        rightnew_array[0][0]=966.61
        rightnew_array[1][0]=447.38
        rightnew_array_1 = np.dot(H, rightnew_array)
        rightnew_array_2 = rightnew_array_1/rightnew_array_1[2][0]

        cv2.rectangle(img2temp, (int(leftnew_array_2[0][0]+0.5), int(leftnew_array_2[1][0]+0.5)),
                      (int(rightnew_array_2[0][0]+0.5), int(rightnew_array_2[1][0]+0.5)), (0, 255, 0), 3)
        #GT
        #cv2.rectangle(img2temp, (int(239.82 + 0.5), int(704.91 + 0.5)),
        #              (int(389.82 + 0.5), int(985.34 + 0.5)), (0, 0, 255), 3)
        cv2.imwrite("./result_asift_rectangle.png", img2temp)
        '''
        
        Mat H = findHomography(queryCoord, objectCoord, CV_RANSAC, 10, mask);
        Mat H_inv = H.inv();
        int inliers_cnt = 0, outliers_cnt = 0;
        
        for (unsigned i = 0; i < queryCoord.size(); i++) {
            Mat col = Mat::ones(3, 1, CV_64F);
            col.at<double>(0) = queryCoord[i].x;
            col.at<double>(1) = queryCoord[i].y;
        
            col = H * col;
            col /= col.at<double>(2); // 将[x*, y*, #] 转换为 [x*/#, y*/#, 1]
            double dist = sqrt(pow(col.at<double>(0) - objectCoord[i].x, 2) +
                pow(col.at<double>(1) - objectCoord[i].y, 2)); // 计算误差-欧式距离
        
            if (dist < inlier_threshold) { // 误差小于阈值
                queryInliers.push_back(queryCoord[i]);
                sceneInliers.push_back(objectCoord[i]);
                inliers_cnt++;
            }
        }


        历史图
        [
          843.5384615384614, 
          230.07692307692304
        ], 
        [
          966.6153846153845, 
          447.38461538461536
        ]
        
        当前图
        [
          239.82608695652175, 
          704.9130434782609
        ], 
        [
          389.82608695652175, 
          985.3478260869565
        ]
        
        
        '''
    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' % len(p1))
    explore_match(img1, img2, kp_pairs, None, H)

4、DenseSIFT

# coding:utf-8
import cv2
import numpy as np

from contextlib import contextmanager
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()

@contextmanager
def Timer(msg):
    print(msg, '...',)
    start = clock()
    try:
        yield
    finally:
        print("%.2f ms" % ((clock()-start)*1000))

def filter_matches(kp1, kp2, matches, ratio = 0.75):
    mkp1, mkp2 = [], []
    for m in matches:
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            m = m[0]
            mkp1.append( kp1[m.queryIdx] )
            mkp2.append( kp2[m.trainIdx] )
    p1 = np.float32([kp.pt for kp in mkp1])
    p2 = np.float32([kp.pt for kp in mkp2])
    kp_pairs = zip(mkp1, mkp2)
    return p1, p2, list(kp_pairs)

with Timer('matching'):
    debug=1
    if debug==1:
        path1 = "./1_IMG_Texture_8Bit.png"
        path2 = "./3_IMG_Texture_8Bit.png"
    else:
        path1 = "./OtherSampleFrame_IMG_Texture_8Bit_48.png"
        path2 = "./OtherSampleFrame_IMG_Texture_8Bit_52.png"

    img1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(path2, cv2.IMREAD_GRAYSCALE)

    sift = cv2.xfeatures2d.SIFT_create()
    # keypoint1, desc1 = sift.detectAndCompute(img1, None)
    # keypoint2, desc2 = sift.detectAndCompute(img2, None)

    rows1, cols1 = img1.shape[:2]
    rows2, cols2 = img2.shape[:2]
    initXyStep=6
    keypoint1 = []
    for lrow in range(6, rows1-6, 6):
        for lcol in range(6, cols1 - 6, 6):
            keypoint = cv2.KeyPoint(lcol, lrow, 6, _class_id=0)
            keypoint1.append(keypoint)

    keypoint2 = []
    for lrow in range(6, rows2-6, 6):
        for lcol in range(6, cols2 - 6, 6):
            keypoint = cv2.KeyPoint(lcol, lrow, 6, _class_id=0)
            keypoint2.append(keypoint)
    keypoint1, desc1 = sift.compute(img1, keypoint1)
    keypoint2, desc2 = sift.compute(img2, keypoint2)

    resultimg = img1.copy()
    resultimg=cv2.drawKeypoints(img1,keypoint1,resultimg,flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
    cv2.imwrite('denseSift_keypoints.png',resultimg)


    flann=1
    if flann==1:
        """
        FLANN是类似最近邻的快速匹配库
            它会根据数据本身选择最合适的算法来处理数据
            比其他搜索算法快10倍
        """
        #原始是 FLANN_INDEX_KDTREE=0 trees=5
        FLANN_INDEX_KDTREE = 1
        indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=6)
        searchParams = dict(checks=50)
        flann = cv2.FlannBasedMatcher(indexParams, searchParams)
        matches = flann.knnMatch(desc1, desc2, k=2)
    else:
        bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)
        matches = bf.knnMatch(desc1, desc2, k=2)


    #是否按照ratio比率剔除
    if 1:
        p1, p2, kp_pairs = filter_matches(keypoint1, keypoint2, matches)
    else:
        mkp1, mkp2 = [], []
        for m in matches:
            m = m[0]
            mkp1.append( keypoint1[m.queryIdx] )
            mkp2.append( keypoint2[m.trainIdx] )
        p1 = np.float32([kp.pt for kp in mkp1])
        p2 = np.float32([kp.pt for kp in mkp2])
        kp_pairs = zip(mkp1, mkp2)
        kp_pairs = list(kp_pairs)

    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 20.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' % len(p1))

    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((h1+h2, w1), np.uint8)
    vis[:h1, :w1] = img1
    vis[h1:h1+h2, :w1] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    p1, p2 = [], []
    for kpp in kp_pairs:
        p1.append(np.int32(kpp[0].pt))
        p2.append(np.int32(np.array(kpp[1].pt) + [0, h1]))

    green = (0, 255, 0)
    for (x1, y1), (x2, y2) in zip(p1, p2):
        col = green
        r = 1
        thickness = 2
        cv2.line(vis, (int(x1), int(y1)), (int(x2), int(y2)), col, thickness)
    cv2.imwrite("./result_denseSift.png", vis)

 

ASIFT+OpenCV图像特征匹配实战VC工程源码 OpenCV包含头文件: #include "cv.h" #include "highgui.h" #include "cxcore.h" 核心代码如下: if (!m_pImage1||!m_pImage2) { AfxMessageBox("please,select 2 images!"); return; } UpdateData(TRUE); CvSize sz1 = cvSize(m_pImage1->width,m_pImage1->height); CvSize sz2 = cvSize(m_pImage2->width,m_pImage2->height); CvScalar s; IplImage *gimg1 = cvCreateImage(sz1,IPL_DEPTH_8U,1); cvCvtColor(m_pImage1,gimg1,CV_BGR2GRAY); IplImage *gimg2 = cvCreateImage(sz2,IPL_DEPTH_8U,1); cvCvtColor(m_pImage2,gimg2,CV_BGR2GRAY); size_t w1, h1; w1 = gimg1->width; h1 = gimg1->height; float * iarr1 = new float[w1*h1]; for(int i=0;i<h1;i++) { for(int j=0;j<w1;j++) { s=cvGet2D(gimg1,i,j); iarr1[i*w1+j] = s.val[0]; } } vector ipixels1(iarr1, iarr1 + w1 * h1); delete [] iarr1; size_t w2, h2; w2 = gimg2->width; h2 = gimg2->height; float * iarr2 = new float[w2*h2]; for(int i=0;i<h2;i++) { for(int j=0;j<w2;j++) { s=cvGet2D(gimg2,i,j); iarr2[i*w2+j] = s.val[0]; } } vector ipixels2(iarr2, iarr2 + w2 * h2); delete [] iarr2; float wS = IM_X; float hS = IM_Y; float zoom1=0, zoom2=0; int wS1=0, hS1=0, wS2=0, hS2=0; vector ipixels1_zoom, ipixels2_zoom; if (!m_bOrininal) { if (m_lWidth==0 || m_lHeight == 0) return; wS = m_lWidth; hS = m_lHeight; float InitSigma_aa = 1.6; float fproj_p, fproj_bg; char fproj_i; float *fproj_x4, *fproj_y4; int fproj_o; fproj_o = 3; fproj_p = 0; fproj_i = 0; fproj_bg = 0; fproj_x4 = 0; fproj_y4 = 0; float areaS = wS * hS; // Resize image 1 float area1 = w1 * h1; zoom1 = sqrt(area1/areaS); wS1 = (int) (w1 / zoom1); hS1 = (int) (h1 / zoom1); int fproj_sx = wS1; int fproj_sy = hS1; float fproj_x1 = 0; float fproj_y1 = 0; float fproj_x2 = wS1; float fproj_y2 = 0; float fproj_x3 = 0; float fproj_y3 = hS1; /* Anti-aliasing filtering along vertical direction */ if ( zoom1 > 1 ) { float sigma_aa = InitSigma_aa * zoom1 / 2; GaussianBlur1D(ipixels1,w1,h1,sigma_aa,1); GaussianBlur1D(ipixels1,w1,h1,sigma_aa,0); } // simulate a tilt: subsample the image along the vertical axis by a factor of t. ipixels1_zoom.resize(wS1*hS1); fproj (ipixels1, ipixels1_zoom, w1, h1, &fproj;_sx, &fproj;_sy, &fproj;_bg, &fproj;_o, &fproj;_p, &fproj;_i , fproj_x1 , fproj_y1 , fproj_x2 , fproj_y2 , fproj_x3 , fproj_y3, fproj_x4, fproj_y4); // Resize image 2 float area2 = w2 * h2; zoom2 = sqrt(area2/areaS); wS2 = (int) (w2 / zoom2); hS2 = (int) (h2 / zoom2); fproj_sx = wS2; fproj_sy = hS2; fproj_x2 = wS2; fproj_y3 = hS2; /* Anti-aliasing filtering along vertical direction */ if ( zoom1 > 1 ) { float sigma_aa = InitSigma_aa * zoom2 / 2; GaussianBlur1D(ipixels2,w2,h2,sigma_aa,1); GaussianBlur1D(ipixels2,w2,h2,sigma_aa,0); } // simulate a tilt: subsample the image along the vertical axis by a factor of t. ipixels2_zoom.resize(wS2*hS2); fproj (ipixels2, ipixels2_zoom, w2, h2, &fproj;_sx, &fproj;_sy, &fproj;_bg, &fproj;_o, &fproj;_p, &fproj;_i , fproj_x1 , fproj_y1 , fproj_x2 , fproj_y2 , fproj_x3 , fproj_y3, fproj_x4, fproj_y4); } else { ipixels1_zoom.resize(w1*h1); ipixels1_zoom = ipixels1; wS1 = w1; hS1 = h1; zoom1 = 1; ipixels2_zoom.resize(w2*h2); ipixels2_zoom = ipixels2; wS2 = w2; hS2 = h2; zoom2 = 1; } int num_of_tilts1 = m_lTilts1; int num_of_tilts2 = m_lTilts2; int verb = 0; // Define the SIFT parameters siftPar siftparameters; default_sift_parameters(siftparameters); vector< vector > keys1; vector< vector > keys2; int num_keys1=0, num_keys2=0; SetWindowText("Computing keypoints on the two images..."); CString str1,str2; time_t tstart, tend1,tend2; tstart = time(0); DWORD dstart = GetTickCount(); num_keys1 = compute_asift_keypoints(ipixels1_zoom, wS1, hS1, num_of_tilts1, verb, keys1, siftparameters); tend1 = time(0); m_lKeyNum1 = num_keys1; UpdateData(FALSE); str1.Format("Img1 Keypoints computation accomplished in %f s",difftime(tend1, tstart)); SetWindowText(str1); num_keys2 = compute_asift_keypoints(ipixels2_zoom, wS2, hS2, num_of_tilts2, verb, keys2, siftparameters); tend2 = time(0); m_lKeyNum2 = num_keys2; UpdateData(FALSE); str2.Format("Img2 Keypoints computation accomplished in %f s ,Matching the keypoints...",difftime(tend2, tstart)); SetWindowText(str2); //// Match ASIFT keypoints int num_matchings; matchingslist matchings; tstart = time(0); num_matchings = compute_asift_matches(num_of_tilts1, num_of_tilts2, wS1, hS1, wS2, hS2, verb, keys1, keys2, matchings, siftparameters); tend1 = time(0); DWORD dSpan = GetTickCount() - dstart; cout << "Keypoints matching accomplished in " << difftime(tend1, tstart) << " seconds." << endl; str2.Format("Keypoints matching accomplished in %f s",difftime(tend1, tstart)); SetWindowText(str2); m_lMatches = num_matchings; UpdateData(FALSE); str1.Format("Total time used:%d ms",dSpan); AfxMessageBox(str1); cvRelease((void**)&gimg1;); cvRelease((void**)&gimg2;); 参考网址:http://www.ipol.im/pub/art/2011/my-asift/
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值