导入相关的包
import cv2
import numpy as np
import random
import os
import matplotlib.pyplot as plt
from timeit import default_timer as timer
获取图片
# 从图像文件夹中获取所有的图像
imgDir = 'imgs/'
imgFiles = os.listdir(imgDir)
# 随机读取两张图片
imgs = random.sample(imgFiles, 2)
img1 = cv2.imread(os.path.join(imgDir, imgs[0]))
img2 = cv2.imread(os.path.join(imgDir, imgs[1]))
# 由于需要在plt下显示图像,需要将opencv中BGR的顺序改为RGB
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
# 转换为灰度图
gray1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
plt.subplot(2,2,1)
plt.imshow(img1)
plt.subplot(2,2,2)
plt.imshow(img2)
plt.subplot(2,2,3)
plt.imshow(gray1, cmap='gray')
plt.subplot(2,2,4)
plt.imshow(gray2, cmap='gray')
plt.show()
计算特征点和描述子
# 初始化检测算子
sift = cv2.xfeatures2d_SIFT.create()
# 计算特征点和描述子
kps1, des1 = sift.detectAndCompute(gray1, None)
kps2, des2 = sift.detectAndCompute(gray2, None)
# 展示描述子
img1Show = cv2.drawKeypoints(gray1, kps1, img1)
img2Show = cv2.drawKeypoints(gray2, kps2, img2)
plt.subplot(1,2,1)
plt.imshow(img1Show)
plt.subplot(1,2,2)
plt.imshow(img2Show)
plt.show()
print(f'kps1[0]:\n{kps1[0]}')
print(f'des1[0]:\n{des1[0]}')
kps1[0]:
<KeyPoint 0x7f29c8455e70>
des1[0]:
[ 0. 5. 7. 0. 0. 0. 0. 0. 1. 0. 0. 1. 2. 0.
0. 0. 4. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0.
0. 0. 0. 0. 37. 124. 98. 6. 2. 0. 0. 4. 64. 20.
5. 9. 24. 12. 7. 12. 136. 53. 10. 1. 2. 2. 2. 34.
26. 28. 25. 1. 3. 5. 4. 8. 136. 60. 22. 4. 1. 1.
4. 96. 86. 10. 1. 6. 10. 53. 52. 65. 136. 136. 60. 2.
1. 10. 16. 18. 8. 136. 136. 1. 1. 1. 4. 4. 136. 2.
1. 6. 4. 0. 1. 107. 55. 4. 8. 61. 33. 8. 7. 54.
24. 12. 9. 8. 1. 14. 113. 104. 1. 24. 20. 0. 0. 34.
97. 11.]
使用BF暴力匹配
参考资料:https://docs.opencv.org/3.3.0/dc/dc3/tutorial_py_matcher.html
Basics of Brute-Force Matcher
Brute-Force Matcher将第一个集合的描述一个向量与第二个集合的所有向量进行匹配,计算出他们之间的“距离”。最后返回最近的一个。
首先使用cv2.BFMatcher()创建对象。该函数接受两个参数。第一个参数确定距离的计算方式。默认情况下是cv2.NORM_L2,这一方式适用于SIFT、SURF(cv2.NORM_L1也适用)。对于基于 binary string的方法,如ORB,BREIF,BRISK等等,cv2.NORM_HAMMING计算方式是适用的。如果 ORB 适用 WTA_K == 3 or 4, 则应该使用cv2.NORM_HAMMING2。
第二个参数是一个bool值,默认为false。如果设置为True,匹配结果返回的将是(i,j),即集合A的第i个描述向量对应B中第j个描述向量。
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
# 如果A中某个点与B中最近相近的2个点的距离区分度足够大,则是好的匹配
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append([m])
imgMatch = cv2.drawMatchesKnn(img1,kps1,img2,kps2,good,None,flags=2)
plt.imshow(imgMatch)
plt.show()
基于FLANN的匹配方法
FLANN指的是Fast Library for Approximate Nearest Neighbors。在大规模数据和高维数据中速度比BF算法快。
对于SIFT,SURF等算法,参数设置为:
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
对于ORB,参数设置为:
FLANN_INDEX_LSH = 6
index_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]
# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
if m.distance < 0.7*n.distance:
matchesMask[i]=[1,0]
draw_params = dict(matchColor = (0,255,0),
singlePointColor = (255,0,0),
matchesMask = matchesMask,
flags = 0)
img3 = cv2.drawMatchesKnn(img1,kps1,img2,kps2,matches,None,**draw_params)
plt.imshow(img3)
plt.show()