Opencv-Python两幅图像匹配

原图
在这里插入图片描述

import cv2

img1 = cv2.imread('SURF_2.jpg', cv2.IMREAD_GRAYSCALE)
img1 = cv2.resize(img1,dsize=(600,400))
img2 = cv2.imread('SURF_1.jpg', cv2.IMREAD_GRAYSCALE)
img2 = cv2.resize(img2,dsize=(600,400))
image1 = img1.copy()
image2 = img2.copy()


#创建一个SURF对象
surf = cv2.xfeatures2d.SURF_create(25000)
#SIFT对象会使用Hessian算法检测关键点,并且对每个关键点周围的区域计算特征向量。该函数返回关键点的信息和描述符
keypoints1,descriptor1 = surf.detectAndCompute(image1,None)
keypoints2,descriptor2 = surf.detectAndCompute(image2,None)
# print('descriptor1:',descriptor1.shape(),'descriptor2',descriptor2.shape())
#在图像上绘制关键点
image1 = cv2.drawKeypoints(image=image1,keypoints = keypoints1,outImage=image1,color=(255,0,255),flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
image2 = cv2.drawKeypoints(image=image2,keypoints = keypoints2,outImage=image2,color=(255,0,255),flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#显示图像
cv2.imshow('surf_keypoints1',image1)
cv2.imshow('surf_keypoints2',image2)
cv2.waitKey(20)


matcher = cv2.FlannBasedMatcher()
matchePoints = matcher.match(descriptor1,descriptor2)
# print(type(matchePoints),len(matchePoints),matchePoints[0])

#提取强匹配特征点
minMatch = 1
maxMatch = 0
for i in range(len(matchePoints)):
    if minMatch > matchePoints[i].distance:
        minMatch = matchePoints[i].distance
    if maxMatch < matchePoints[i].distance:
        maxMatch = matchePoints[i].distance
    print('最佳匹配值是:',minMatch)
    print('最差匹配值是:',maxMatch)

#获取排雷在前边的几个最优匹配结果
goodMatchePoints = []
for i in range(len(matchePoints)):
    if matchePoints[i].distance < minMatch + (maxMatch-minMatch)/16:
        goodMatchePoints.append(matchePoints[i])

#绘制最优匹配点
outImg = None
outImg = cv2.drawMatches(img1,keypoints1,img2,keypoints2,goodMatchePoints,outImg,
                         matchColor=(0,255,0),flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
cv2.imshow('matche',outImg)
cv2.waitKey(0)
cv2.destroyAllWindows()

在这里插入图片描述
原图
在这里插入图片描述

#coding=utf-8
import cv2
from matplotlib import pyplot as plt

img=cv2.imread('xfeatures2d.SURF_create2.jpg',0)
# surf=cv2.SURF(400)   #Hessian阈值400
# kp,des=surf.detectAndCompute(img,None)
# leng=len(kp)
# print(leng)
# 关键点太多,重取阈值

surf=cv2.cv2.xfeatures2d.SURF_create(50000)   #Hessian阈值50000
kp,des=surf.detectAndCompute(img,None)
leng=len(kp)
print(leng)

img2=cv2.drawKeypoints(img,kp,None,(255,0,0),4)
plt.imshow(img2)
plt.show()

# 下面是U-SURF算法,关键点朝向一致,运算速度加快。
surf.upright=True
kp=surf.detect(img,None)
img3=cv2.drawKeypoints(img,kp,None,(255,0,0),4)

plt.imshow(img3)
plt.show()

#检测关键点描述符大小,改64维成128维
surf.extended=True
kp,des=surf.detectAndCompute(img,None)
dem1=surf.descriptorSize()
print(dem1)
shp1=des.shape()
print(shp1)

效果图
在这里插入图片描述

import cv2
from matplotlib import pyplot as plt

leftImage = cv2.imread('xfeatures2d.SURF_create_1.jpg')
rightImage = cv2.imread('xfeatures2d.SURF_create_2.jpg')

# 创造sift
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(leftImage, None)
kp2, des2 = sift.detectAndCompute(rightImage, None)  # 返回关键点信息和描述符

FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
searchParams = dict(checks=50)  # 指定索引树要被遍历的次数

flann = cv2.FlannBasedMatcher(indexParams, searchParams)
matches = flann.knnMatch(des1, des2, k=2)
matchesMask = [[0, 0] for i in range(len(matches))]
print("matches", matches[0])
for i, (m, n) in enumerate(matches):
    if m.distance < 0.07 * n.distance:
        matchesMask[i] = [1, 0]

drawParams = dict(matchColor=(0, 255, 0), singlePointColor=None,
                  matchesMask=matchesMask, flags=2)  # flag=2只画出匹配点,flag=0把所有的点都画出
resultImage = cv2.drawMatchesKnn(leftImage, kp1, rightImage, kp2, matches, None, **drawParams)
plt.imshow(resultImage)
plt.show()

在这里插入图片描述

  • 3
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 8
    评论
评论 8
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值