都是opencv函数的一些调用,注意不同的图像数据需要调整不同的参数。
orb特征提取是最消耗时间的一步,平均每张图像需要5ms左右(受图像分辨率和提取的特征点数影响),匹配和ransac计算基础矩阵则各不超过3ms即可完成
import cv2
import time
import numpy as np
im1 = '/06/image_0/000013.png'
im2 = '/06/image_0/000030.png'
print(cv2.__version__)
psd_img_1 = cv2.imread(im1, cv2.IMREAD_COLOR)
# psd_img_1 = cv2.resize(psd_img_1, (320, 240),
# interpolation=cv2.INTER_CUBIC)
psd_img_2 = cv2.imread(im2, cv2.IMREAD_COLOR)
# 3)ORB特征计算 9ms
# sift = cv2.xfeatures2d.SIFT_create()
orb = cv2.ORB_create(500)
# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(psd_img_1, None)
kp2, des2 = orb.detectAndCompute(psd_img_2, None)
# 暴力 参数设计
# flann = cv2.FlannBasedMatcher(index_params, search_params)
match = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
matches = match.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance) # 从小到大
# Apply ratio test
# 比值测试,首先获取与 A距离最近的点 B (最近)和 C (次近),
# 只有当 B/C 小于阀值时(0.75)才被认为是匹配,
# 因为假设匹配是一一对应的,真正的匹配的理想距离为0
good = []
pts1 = []
pts2 = []
print(matches[0].distance,len(matches))
for m in matches:
if m.distance < max(2 * matches[0].distance, 30):
good.append([m])
pts1.append(kp1[m.queryIdx].pt)
pts2.append(kp2[m.trainIdx].pt)
time1=time.time()
pts1 = np.asarray(pts1)
pts2 = np.asarray(pts2)
#计算基础矩阵 1ms
good_F, status = cv2.findFundamentalMat(pts1, pts2, method=cv2.FM_RANSAC, ransacReprojThreshold=3, confidence=0.99,maxIters=100)
time2=time.time()
print('len: ', len(good), np.sum(status),time2-time1) #内点数
img3 = cv2.drawMatchesKnn(psd_img_1, kp1, psd_img_2, kp2, good, None, flags=2)
cv2.namedWindow('image1', cv2.WINDOW_NORMAL)
cv2.imshow("image1", img3)
cv2.waitKey(0) # 等待按键按下
cv2.destroyAllWindows() # 清除所有窗口