ASIFT-Python-ICP

pipeline_python_asift.py

# Python 2/3 compatibility
from __future__ import print_function
import sys
import numpy as np
import cv2 as cv

# local modules
from common import Timer
from descriptor import init_feature, filter_matches, explore_match

def affine_skew(tilt, phi, img, mask=None):
    '''
    affine_skew(tilt, phi, img, mask=None) -> skew_img, skew_mask, Ai
    Ai - is an affine transform matrix from skew_img to img
    '''
    h, w = img.shape[:2]
    if mask is None:
        mask = np.zeros((h, w), np.uint8)
        mask[:] = 255
    A = np.float32([[1, 0, 0], [0, 1, 0]])
    if phi != 0.0:
        phi = np.deg2rad(phi)
        s, c = np.sin(phi), np.cos(phi)
        A = np.float32([[c,-s], [ s, c]])
        corners = [[0, 0], [w, 0], [w, h], [0, h]]
        tcorners = np.int32( np.dot(corners, A.T) )
        x, y, w, h = cv.boundingRect(tcorners.reshape(1,-1,2))
        A = np.hstack([A, [[-x], [-y]]])
        img = cv.warpAffine(img, A, (w, h), flags=cv.INTER_LINEAR, borderMode=cv.BORDER_REPLICATE)
    if tilt != 1.0:
        s = 0.8*np.sqrt(tilt*tilt-1)
        img = cv.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
        img = cv.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv.INTER_NEAREST)
        A[0] /= tilt
    if phi != 0.0 or tilt != 1.0:
        h, w = img.shape[:2]
        mask = cv.warpAffine(mask, A, (w, h), flags=cv.INTER_NEAREST)
    Ai = cv.invertAffineTransform(A)
    return img, mask, Ai

def affine_detect(detector, img, mask=None):
    '''
    affine_detect(detector, img, mask=None, pool=None) -> keypoints, descrs
    Apply a set of affine transformations to the image, detect keypoints and
    reproject them into initial image coordinates.
    See http://www.ipol.im/pub/algo/my_affine_sift/ for the details.
    ThreadPool object may be passed to speedup the computation.
    '''

    params = [(1.0, 0.0)]
    for t in 2**(0.5*np.arange(1,6)):
        for phi in np.arange(0, 180, 72.0 / t):
            params.append((t, phi))

    hh,ww = img.shape[:2]

    keypointa_all, descrs_all = [], []

    for i, (k, d) in enumerate(params):
        t, phi = k, d
        timg, tmask, Ai = affine_skew(t, phi, img)
        img_disp = cv.bitwise_and(timg, timg, mask=tmask);

        keypoints, descrs = detector.detectAndCompute(timg, tmask)
        for kp in keypoints:
            x, y = kp.pt
            kp.pt = tuple( np.dot(Ai, (x, y, 1)) )
            # Out of bounds judgment
            if ((kp.pt[0]<0) or (kp.pt[1]<0) or (kp.pt[0] > ww-1) or (kp.pt[1] > hh-1)):
                if (kp.pt[0] < 0):
                    kp.pt = (0, kp.pt[1])
                if (kp.pt[1] < 0):
                    kp.pt = (kp.pt[0], 0)
                if (kp.pt[0] > ww-1):
                    kp.pt = (ww-1, kp.pt[1])
                if (kp.pt[1] > hh-1):
                    kp.pt = (kp.pt[0], hh-1)
        if descrs is None:
            descrs = []
        keypointa_all.extend(keypoints)
        descrs_all.extend(descrs)

    return keypointa_all, np.array(descrs_all)

def setCameraParams(fx, fy, cx, cy, k1, k2, k3, p1, p2):
    cameraMatrix = np.zeros(shape=(3, 3))
    cameraMatrix[0, 0] = fx
    cameraMatrix[1, 1] = fy
    cameraMatrix[0, 2] = cx
    cameraMatrix[1, 2] = cy
    cameraMatrix[2, 2] = 1
    distortionCoeffs = np.array([[k1, k2, p1, p2, k3]])
    return cameraMatrix

def main():

    feature_name = 'sift'
    exp = 0
    if exp == 0:
        fn1 = "./save_ply/OtherSampleFrame_IMG_Texture_8Bit_48.png"
        fn2 = "./save_ply/OtherSampleFrame_IMG_Texture_8Bit_52.png"

        depfn1 = "./save_ply/OtherSampleFrame_IMG_DepthMap_48.tif"
        depfn2 = "./save_ply/OtherSampleFrame_IMG_DepthMap_52.tif"
    else:
        fn1 = "./save_ply/P1000965.JPG"
        fn2 = "./save_ply/P1000966.JPG"

        depfn1 = "./save_ply/11_IMG_DepthMap.tif"
        depfn2 = "./save_ply/22_IMG_DepthMap.tif"

    img1 = cv.imread(fn1, cv.IMREAD_GRAYSCALE)
    img2 = cv.imread(fn2, cv.IMREAD_GRAYSCALE)

    depImage1 = cv.imread(depfn1, -1)
    depImage2 = cv.imread(depfn2, -1)

    detector, matcher = init_feature(feature_name)

    if img1 is None:
        print('Failed to load fn1:', fn1)
        sys.exit(1)

    if img2 is None:
        print('Failed to load fn2:', fn2)
        sys.exit(1)

    if detector is None:
        print('unknown feature:', feature_name)
        sys.exit(1)

    print('using', feature_name)

    testGray = cv.imread("/home/spple/CLionProjects/python_asift_gpu/save_ply/11_IMG_Texture_8Bit.png", cv.IMREAD_GRAYSCALE)
    testDep = cv.imread("/home/spple/CLionProjects/python_asift_gpu/save_ply/11_IMG_DepthMap.tif", -1)

    with Timer('affine_detect'):
        kp1, desc1 = affine_detect(detector, img1)
        print('process: ', "img1")
        kp2, desc2 = affine_detect(detector, img2)
        print('process: ', "img2")
        print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))


    CameraMatrix = setCameraParams(2269.16, 2268.4, 1065.54, 799.032, -0.121994, 0.154463, -0.0307676, 0.000367495, -0.000926385)


    with Timer('matching'):
        raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
    with Timer('ICP-Rt'):
        # No1 Feature point elimination
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            # No2 Feature point elimination
            H, status = cv.findHomography(p1, p2, cv.RANSAC, 20.0) #ori 5.0 --> 20.0
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print('%d matches found, not enough for homography estimation' % len(p1))

        explore_match(img1, img2, depImage1, depImage2, kp_pairs, None, H, CameraMatrix)
        print('Done')

if __name__ == '__main__':
    with Timer('all time:'):
        main()

descriptor.py

# Python 2/3 compatibility
from __future__ import print_function

import numpy as np
import cv2 as cv2

from common import anorm, getsize

FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
FLANN_INDEX_LSH    = 6


def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.xfeatures2d.SIFT_create()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF_create(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB_create(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE_create()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK_create()
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher


def filter_matches(kp1, kp2, matches, ratio = 0.75):
    mkp1, mkp2 = [], []
    for m in matches:
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            m = m[0]
            mkp1.append( kp1[m.queryIdx] )
            mkp2.append( kp2[m.trainIdx] )
    p1 = np.float32([kp.pt for kp in mkp1])
    p2 = np.float32([kp.pt for kp in mkp2])
    kp_pairs = zip(mkp1, mkp2)
    return p1, p2, list(kp_pairs)

def best_fit_transform(A, B):
    '''
    Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
    Input:
      A: Nxm numpy array of corresponding points
      B: Nxm numpy array of corresponding points
    Returns:
      T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
      R: mxm rotation matrix
      t: mx1 translation vector
    '''
    from sklearn.neighbors import NearestNeighbors
    assert A.shape == B.shape

    # get number of dimensions
    m = A.shape[1]

    # translate points to their centroids
    centroid_A = np.mean(A, axis=0)
    centroid_B = np.mean(B, axis=0)
    AA = A - centroid_A
    BB = B - centroid_B

    # rotation matrix
    H = np.dot(AA.T, BB)
    U, S, Vt = np.linalg.svd(H)
    R = np.dot(Vt.T, U.T)

    # special reflection case
    if np.linalg.det(R) < 0:
        Vt[m - 1, :] *= -1
        R = np.dot(Vt.T, U.T)

    # translation
    t = centroid_B.T - np.dot(R, centroid_A.T)

    # homogeneous transformation
    T = np.identity(m + 1)
    T[:m, :m] = R
    T[:m, m] = t
    return T, R, t

def explore_match(img1, img2, depImage1, depImage2, kp_pairs, status = None, H = None, CameraMatrix = None):
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((max(h1, h2), w1+w2), np.uint8)
    vis[:h1, :w1] = img1
    vis[:h2, w1:w1+w2] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    if H is not None:
        corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
        corners = np.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
        cv2.polylines(vis, [corners], True, (255, 255, 255))

    if status is None:
        status = np.ones(len(kp_pairs), np.bool_)
    p1, p2 = [], []  # python 2 / python 3 change of zip unpacking
    newp1, newp2 = [], []
    for kpp in kp_pairs:
        p1.append(np.int32(kpp[0].pt))
        newp1.append(np.int32(np.array(kpp[0].pt)))
        p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0]))
        newp2.append(np.int32(np.array(kpp[1].pt)))

    # No3 Feature point elimination
    CameraMatrix = np.array(CameraMatrix)
    retval, mask = cv2.findEssentialMat(np.array(newp1), np.array(newp2), CameraMatrix, method=cv2.RANSAC, threshold=20)

    trp1 = []
    trp2 = []
    for key, ((x1, y1), (x2, y2)) in enumerate(zip(newp1, newp2)):
        left_d = depImage1[y1][x1]
        left_z = float(left_d) / CameraMatrix[2][2]
        left_x = (x1 - CameraMatrix[0][2]) * left_z / CameraMatrix[0][0]
        left_y = (y1 - CameraMatrix[1][2]) * left_z / CameraMatrix[1][1]
        points1 = np.array([left_x, left_y, left_z])
        flag1 = (np.sum(abs(points1)) < 0.001)
        right_d = depImage2[y2][x2]
        right_z = float(right_d) / CameraMatrix[2][2]
        right_x = (x2 - CameraMatrix[0][2]) * right_z / CameraMatrix[0][0]
        right_y = (y2 - CameraMatrix[1][2]) * right_z / CameraMatrix[1][1]
        points2 = np.array([right_x, right_y, right_z])
        flag2 = (np.sum(abs(points2)) < 0.001)
        if mask[key] == True and flag1 == False and flag2 == False:
            trp1.append(points1)
            trp2.append(points2)

    # PCL-ICP- R t Matrix
    newtrp1 = np.array(trp1)
    newtrp2 = np.array(trp2)
    T, R, t = best_fit_transform(newtrp1, newtrp2)
    print(R.astype(np.float16))
    print(t.astype(np.float16))

    txtMatrix = list(R.flatten())
    txtMatrix.extend(list(t.flatten()))
    file_write_obj = open("/home/spple/CLionProjects/python_asift_gpu/save_ply/Rt_48_52.txt", 'w')
    for var in txtMatrix:
        file_write_obj.write(str(var))
        file_write_obj.write('\n')
    file_write_obj.close()

    # import os
    # cmmd = str("/home/spple/CLionProjects/python_asift_gpu/interactive_icp/build/PCL_demo1 "+ "/home/spple/CLionProjects/python_asift_gpu/save_ply/SampleFrame_52.ply"+" /home/spple/CLionProjects/python_asift_gpu/save_ply/SampleFrame_48.ply"+ " /home/spple/CLionProjects/python_asift_gpu/save_ply/Rt_48_52.txt")
    # print(cmmd)
    # os.system(cmmd)


    green = (0, 255, 0)
    red = (0, 0, 255)
    vis0 = vis.copy()
    for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
        if inlier:
            cv2.line(vis0, (x1, y1), (x2, y2), green)
    cv2.imwrite("./save_ply/No2_FeaturePointElimination.png", vis0)

    for (x1, y1), (x2, y2), inlier in zip(p1, p2, mask):
        if inlier:
            cv2.line(vis0, (x1, y1), (x2, y2), green)
    cv2.imwrite("./save_ply/No3_FeaturePointElimination.png", vis0)

common.py

'''
This module contains some common routines used by other samples.
'''

# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3

if PY3:
    from functools import reduce

import numpy as np
import cv2 as cv

# built-in modules
import os
import itertools as it
from contextlib import contextmanager

image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']

class Bunch(object):
    def __init__(self, **kw):
        self.__dict__.update(kw)
    def __str__(self):
        return str(self.__dict__)

def splitfn(fn):
    path, fn = os.path.split(fn)
    name, ext = os.path.splitext(fn)
    return path, name, ext

def anorm2(a):
    return (a*a).sum(-1)
def anorm(a):
    return np.sqrt( anorm2(a) )

def homotrans(H, x, y):
    xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
    ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
    s  = H[2, 0]*x + H[2, 1]*y + H[2, 2]
    return xs/s, ys/s

def to_rect(a):
    a = np.ravel(a)
    if len(a) == 2:
        a = (0, 0, a[0], a[1])
    return np.array(a, np.float64).reshape(2, 2)

def rect2rect_mtx(src, dst):
    src, dst = to_rect(src), to_rect(dst)
    cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
    tx, ty = dst[0] - src[0] * (cx, cy)
    M = np.float64([[ cx,  0, tx],
                    [  0, cy, ty],
                    [  0,  0,  1]])
    return M


def lookat(eye, target, up = (0, 0, 1)):
    fwd = np.asarray(target, np.float64) - eye
    fwd /= anorm(fwd)
    right = np.cross(fwd, up)
    right /= anorm(right)
    down = np.cross(fwd, right)
    R = np.float64([right, down, fwd])
    tvec = -np.dot(R, eye)
    return R, tvec

def mtx2rvec(R):
    w, u, vt = cv.SVDecomp(R - np.eye(3))
    p = vt[0] + u[:,0]*w[0]    # same as np.dot(R, vt[0])
    c = np.dot(vt[0], p)
    s = np.dot(vt[1], p)
    axis = np.cross(vt[0], vt[1])
    return axis * np.arctan2(s, c)

def draw_str(dst, target, s):
    x, y = target
    cv.putText(dst, s, (x+1, y+1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv.LINE_AA)
    cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA)

class Sketcher:
    def __init__(self, windowname, dests, colors_func):
        self.prev_pt = None
        self.windowname = windowname
        self.dests = dests
        self.colors_func = colors_func
        self.dirty = False
        self.show()
        cv.setMouseCallback(self.windowname, self.on_mouse)

    def show(self):
        cv.imshow(self.windowname, self.dests[0])

    def on_mouse(self, event, x, y, flags, param):
        pt = (x, y)
        if event == cv.EVENT_LBUTTONDOWN:
            self.prev_pt = pt
        elif event == cv.EVENT_LBUTTONUP:
            self.prev_pt = None

        if self.prev_pt and flags & cv.EVENT_FLAG_LBUTTON:
            for dst, color in zip(self.dests, self.colors_func()):
                cv.line(dst, self.prev_pt, pt, color, 5)
            self.dirty = True
            self.prev_pt = pt
            self.show()


# palette data from matplotlib/_cm.py
_jet_data =   {'red':   ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
                         (1, 0.5, 0.5)),
               'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
                         (0.91,0,0), (1, 0, 0)),
               'blue':  ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
                         (1, 0, 0))}

cmap_data = { 'jet' : _jet_data }

def make_cmap(name, n=256):
    data = cmap_data[name]
    xs = np.linspace(0.0, 1.0, n)
    channels = []
    eps = 1e-6
    for ch_name in ['blue', 'green', 'red']:
        ch_data = data[ch_name]
        xp, yp = [], []
        for x, y1, y2 in ch_data:
            xp += [x, x+eps]
            yp += [y1, y2]
        ch = np.interp(xs, xp, yp)
        channels.append(ch)
    return np.uint8(np.array(channels).T*255)

def nothing(*arg, **kw):
    pass

def clock():
    return cv.getTickCount() / cv.getTickFrequency()

@contextmanager
def Timer(msg):
    print(msg, '...',)
    start = clock()
    try:
        yield
    finally:
        print("%.2f ms" % ((clock()-start)*1000))

class StatValue:
    def __init__(self, smooth_coef = 0.5):
        self.value = None
        self.smooth_coef = smooth_coef
    def update(self, v):
        if self.value is None:
            self.value = v
        else:
            c = self.smooth_coef
            self.value = c * self.value + (1.0-c) * v

class RectSelector:
    def __init__(self, win, callback):
        self.win = win
        self.callback = callback
        cv.setMouseCallback(win, self.onmouse)
        self.drag_start = None
        self.drag_rect = None
    def onmouse(self, event, x, y, flags, param):
        x, y = np.int16([x, y]) # BUG
        if event == cv.EVENT_LBUTTONDOWN:
            self.drag_start = (x, y)
            return
        if self.drag_start:
            if flags & cv.EVENT_FLAG_LBUTTON:
                xo, yo = self.drag_start
                x0, y0 = np.minimum([xo, yo], [x, y])
                x1, y1 = np.maximum([xo, yo], [x, y])
                self.drag_rect = None
                if x1-x0 > 0 and y1-y0 > 0:
                    self.drag_rect = (x0, y0, x1, y1)
            else:
                rect = self.drag_rect
                self.drag_start = None
                self.drag_rect = None
                if rect:
                    self.callback(rect)
    def draw(self, vis):
        if not self.drag_rect:
            return False
        x0, y0, x1, y1 = self.drag_rect
        cv.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
        return True
    @property
    def dragging(self):
        return self.drag_rect is not None


def grouper(n, iterable, fillvalue=None):
    '''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
    args = [iter(iterable)] * n
    if PY3:
        output = it.zip_longest(fillvalue=fillvalue, *args)
    else:
        output = it.izip_longest(fillvalue=fillvalue, *args)
    return output

def mosaic(w, imgs):
    '''Make a grid from images.
    w    -- number of grid columns
    imgs -- images (must have same size and format)
    '''
    imgs = iter(imgs)
    if PY3:
        img0 = next(imgs)
    else:
        img0 = imgs.next()
    pad = np.zeros_like(img0)
    imgs = it.chain([img0], imgs)
    rows = grouper(w, imgs, pad)
    return np.vstack(map(np.hstack, rows))

def getsize(img):
    h, w = img.shape[:2]
    return w, h

def mdot(*args):
    return reduce(np.dot, args)

def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
    for kp in keypoints:
        x, y = kp.pt
        cv.circle(vis, (int(x), int(y)), 2, color)

 

ASIFT+OpenCV图像特征匹配实战VC工程源码 OpenCV包含头文件: #include "cv.h" #include "highgui.h" #include "cxcore.h" 核心代码如下: if (!m_pImage1||!m_pImage2) { AfxMessageBox("please,select 2 images!"); return; } UpdateData(TRUE); CvSize sz1 = cvSize(m_pImage1->width,m_pImage1->height); CvSize sz2 = cvSize(m_pImage2->width,m_pImage2->height); CvScalar s; IplImage *gimg1 = cvCreateImage(sz1,IPL_DEPTH_8U,1); cvCvtColor(m_pImage1,gimg1,CV_BGR2GRAY); IplImage *gimg2 = cvCreateImage(sz2,IPL_DEPTH_8U,1); cvCvtColor(m_pImage2,gimg2,CV_BGR2GRAY); size_t w1, h1; w1 = gimg1->width; h1 = gimg1->height; float * iarr1 = new float[w1*h1]; for(int i=0;i<h1;i++) { for(int j=0;j<w1;j++) { s=cvGet2D(gimg1,i,j); iarr1[i*w1+j] = s.val[0]; } } vector ipixels1(iarr1, iarr1 + w1 * h1); delete [] iarr1; size_t w2, h2; w2 = gimg2->width; h2 = gimg2->height; float * iarr2 = new float[w2*h2]; for(int i=0;i<h2;i++) { for(int j=0;j<w2;j++) { s=cvGet2D(gimg2,i,j); iarr2[i*w2+j] = s.val[0]; } } vector ipixels2(iarr2, iarr2 + w2 * h2); delete [] iarr2; float wS = IM_X; float hS = IM_Y; float zoom1=0, zoom2=0; int wS1=0, hS1=0, wS2=0, hS2=0; vector ipixels1_zoom, ipixels2_zoom; if (!m_bOrininal) { if (m_lWidth==0 || m_lHeight == 0) return; wS = m_lWidth; hS = m_lHeight; float InitSigma_aa = 1.6; float fproj_p, fproj_bg; char fproj_i; float *fproj_x4, *fproj_y4; int fproj_o; fproj_o = 3; fproj_p = 0; fproj_i = 0; fproj_bg = 0; fproj_x4 = 0; fproj_y4 = 0; float areaS = wS * hS; // Resize image 1 float area1 = w1 * h1; zoom1 = sqrt(area1/areaS); wS1 = (int) (w1 / zoom1); hS1 = (int) (h1 / zoom1); int fproj_sx = wS1; int fproj_sy = hS1; float fproj_x1 = 0; float fproj_y1 = 0; float fproj_x2 = wS1; float fproj_y2 = 0; float fproj_x3 = 0; float fproj_y3 = hS1; /* Anti-aliasing filtering along vertical direction */ if ( zoom1 > 1 ) { float sigma_aa = InitSigma_aa * zoom1 / 2; GaussianBlur1D(ipixels1,w1,h1,sigma_aa,1); GaussianBlur1D(ipixels1,w1,h1,sigma_aa,0); } // simulate a tilt: subsample the image along the vertical axis by a factor of t. ipixels1_zoom.resize(wS1*hS1); fproj (ipixels1, ipixels1_zoom, w1, h1, &fproj;_sx, &fproj;_sy, &fproj;_bg, &fproj;_o, &fproj;_p, &fproj;_i , fproj_x1 , fproj_y1 , fproj_x2 , fproj_y2 , fproj_x3 , fproj_y3, fproj_x4, fproj_y4); // Resize image 2 float area2 = w2 * h2; zoom2 = sqrt(area2/areaS); wS2 = (int) (w2 / zoom2); hS2 = (int) (h2 / zoom2); fproj_sx = wS2; fproj_sy = hS2; fproj_x2 = wS2; fproj_y3 = hS2; /* Anti-aliasing filtering along vertical direction */ if ( zoom1 > 1 ) { float sigma_aa = InitSigma_aa * zoom2 / 2; GaussianBlur1D(ipixels2,w2,h2,sigma_aa,1); GaussianBlur1D(ipixels2,w2,h2,sigma_aa,0); } // simulate a tilt: subsample the image along the vertical axis by a factor of t. ipixels2_zoom.resize(wS2*hS2); fproj (ipixels2, ipixels2_zoom, w2, h2, &fproj;_sx, &fproj;_sy, &fproj;_bg, &fproj;_o, &fproj;_p, &fproj;_i , fproj_x1 , fproj_y1 , fproj_x2 , fproj_y2 , fproj_x3 , fproj_y3, fproj_x4, fproj_y4); } else { ipixels1_zoom.resize(w1*h1); ipixels1_zoom = ipixels1; wS1 = w1; hS1 = h1; zoom1 = 1; ipixels2_zoom.resize(w2*h2); ipixels2_zoom = ipixels2; wS2 = w2; hS2 = h2; zoom2 = 1; } int num_of_tilts1 = m_lTilts1; int num_of_tilts2 = m_lTilts2; int verb = 0; // Define the SIFT parameters siftPar siftparameters; default_sift_parameters(siftparameters); vector< vector > keys1; vector< vector > keys2; int num_keys1=0, num_keys2=0; SetWindowText("Computing keypoints on the two images..."); CString str1,str2; time_t tstart, tend1,tend2; tstart = time(0); DWORD dstart = GetTickCount(); num_keys1 = compute_asift_keypoints(ipixels1_zoom, wS1, hS1, num_of_tilts1, verb, keys1, siftparameters); tend1 = time(0); m_lKeyNum1 = num_keys1; UpdateData(FALSE); str1.Format("Img1 Keypoints computation accomplished in %f s",difftime(tend1, tstart)); SetWindowText(str1); num_keys2 = compute_asift_keypoints(ipixels2_zoom, wS2, hS2, num_of_tilts2, verb, keys2, siftparameters); tend2 = time(0); m_lKeyNum2 = num_keys2; UpdateData(FALSE); str2.Format("Img2 Keypoints computation accomplished in %f s ,Matching the keypoints...",difftime(tend2, tstart)); SetWindowText(str2); //// Match ASIFT keypoints int num_matchings; matchingslist matchings; tstart = time(0); num_matchings = compute_asift_matches(num_of_tilts1, num_of_tilts2, wS1, hS1, wS2, hS2, verb, keys1, keys2, matchings, siftparameters); tend1 = time(0); DWORD dSpan = GetTickCount() - dstart; cout << "Keypoints matching accomplished in " << difftime(tend1, tstart) << " seconds." << endl; str2.Format("Keypoints matching accomplished in %f s",difftime(tend1, tstart)); SetWindowText(str2); m_lMatches = num_matchings; UpdateData(FALSE); str1.Format("Total time used:%d ms",dSpan); AfxMessageBox(str1); cvRelease((void**)&gimg1;); cvRelease((void**)&gimg2;); 参考网址:http://www.ipol.im/pub/art/2011/my-asift/
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值