运行环境:
- python3.6
- opencv3.4.15
- Opencv-contrib-python3.4.15
import cv2
import numpy as np
# 图像处理展示
def cvshow(name, img):
cv2.imshow(name, img)
cv2.waitKey(0)
# cv2.destroyAllWindows()
# SURF特征点检测
def surf_kp(image):
# 灰度图
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 设置Hessian矩阵值,值越大,检测出特征点越精确
surf = cv2.xfeatures2d.SURF_create(hessianThreshold=15000)
# 提取出surf特征点和坐标
kp, des = surf.detectAndCompute(image, None)
# 灰度图特征点描述
kp_image = cv2.drawKeypoints(gray_image, kp,None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
return kp_image, kp, des
# 特征点匹配
def get_good_match(des1, des2):
# FlannBasedMatcher特征匹配算法(最近邻搜索)
FLANN_INDEX_KDTREE = 0 # 建立FLANN匹配器的参数
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) # 配置索引,密度树的数量为5
searchParams = dict(checks=50) # 指定递归次数
flann = cv2.FlannBasedMatcher(indexParams, searchParams)
matches = flann.knnMatch(des1, des2, k=2) # des1为模板图,des2为匹配图
matches = sorted(matches, key=lambda x: x[0].distance / x[1].distance)
good = []
for m, n in matches:
# 如果第一个邻近距离比第二个邻近距离的0.75倍小,则保留
if m.distance < 0.75 * n.distance:
good.append(m)
return good
# 图像拼接融合
def surfimg_rightlignment(img_right, img_left):
_, kp1, des1 = surf_kp(img_right)
_, kp2, des2 = surf_kp(img_left)
goodMatch = get_good_match(des1, des2)
# 当筛选项的匹配对大于8对时:计算视角变换矩阵
if len(goodMatch) > 8:
# 获取匹配对的点坐标
ptsA = np.float32([kp1[m.queryIdx].pt for m in goodMatch]).reshape(-1, 1, 2)
ptsB = np.float32([kp2[m.trainIdx].pt for m in goodMatch]).reshape(-1, 1, 2)
# 该函数的作用就是先用RANSAC选择最优的四组配对点,再计算H矩阵。H为3*3矩阵
ransacReprojThreshold = 4
H, statuRANSACs = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, ransacReprojThreshold)
# 实现透视变换转换图像融合
result = cv2.warpPerspective(img_right, H, (img_right.shape[1] + img_left.shape[1], img_right.shape[0]),flags=cv2.INTER_NEAREST)
result_medium = result
cvshow('result_medium', result_medium)
# 将左边图像映射
result[0:img_left.shape[0], 0:img_left.shape[1]] = img_left
return result
else:
print("not enough matches are found")
def main(left_img,right_img):
# 读取拼接图片(注意图片左右的放置)
img_right = cv2.imread(right_img)
img_left = cv2.imread(left_img)
# 按比例缩小图片
img_right = cv2.resize(img_right, None, fx=1, fy=1)
# 保证两张图一样大
img_left = cv2.resize(img_left, (img_right.shape[1], img_right.shape[0]))
# 特征点检测
kpimg_right, kp1, des1 = surf_kp(img_right)
kpimg_left, kp2, des2 = surf_kp(img_left)
# 同时显示原图和特征点检测后的图
cvshow('img_left_kp', np.hstack((img_left, kpimg_left)))
cvshow('img_right_kp', np.hstack((img_right, kpimg_right)))
goodMatch = get_good_match(des2,des1)
# 特征连线配对
all_goodmatch_img = cv2.drawMatches(img_left,kp2,img_right, kp1,goodMatch, None, flags=2)
# 筛选匹配度较高的匹配对
goodmatch_img = cv2.drawMatches( img_left, kp2,img_right, kp1, goodMatch[:18], None, flags=2)
# 展示所有匹配对
cvshow('Keypoint_Matches_ALL', all_goodmatch_img)
# 展示筛选后的匹配对
cvshow('Keypoint_Matches_good', goodmatch_img)
# 图像融合拼接
result = surfimg_rightlignment(img_right, img_left)
cvshow('Result', result)
if __name__ == '__main__':
main('images/2-1-1.png','images/2-2-2.png')
左边图像
右边图像