opencv实现全景拼接

import cv2 as cv
import numpy as np
from PIL import Image
path1="./images/floor1.png"#查询图像
path2="./images/floor2.png"
quary_img=cv.imread(path2)#查询图像
train_img=cv.imread(path1)

train_img

quary_img

图片左右顺序很重要,quary(B)是全景的左边,train(A)是全景右边

提取关键点和描述符

def detectAndDescribe(image,method=None):
    #四种不同的特征提取器
    if method=='sift':
        descriptor=cv.xfeatures2d.SIFT_create() #每个关键点返回一个128维特征向量
    elif method=='surf':
        descriptor=cv.xfeatures2d.SURF_create()#每个关键点返回一个64维特征向量
    elif method=='brisk':
        descriptor=cv.BRISK_create()
    elif method=='orb':
        descriptor=cv.ORB_create()
        
    (kps,features)=descriptor.detectAndCompute(image,None)
    
    return (kps,features)

kps,features=detectAndDescribe(quary_img,'sift')

特征点可视化

img_kp = np.copy(quary_img)
#img_kp=cv.drawKeypoints(img_kp,kps,img_kp,flags = cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#img_kp=cv.drawKeypoints(img_kp,kps,img_kp)
gray=cv.cvtColor(img_kp,cv.COLOR_BGR2GRAY)
img_kp=cv.drawKeypoints(gray , kps , img_kp)

SIFT和SURF,OpenCV建议使用欧几里得距离 ORB和BRISK等其他特征提取器,建议使用汉明距离

交叉检查布尔参数表示这两个特征是否具有相互匹配才视为有效。换句话说,对于被认为有效的一对特征(f1,f2),f1需要匹配f2,f2也必须匹配f1作为最接近的匹配。此过程可确保提供更强大的匹配功能集,

kps_q,features_q=detectAndDescribe(quary_img,'sift')
kps_t,features_t=detectAndDescribe(train_img,'sift')
kps_q=np.float32([kp.pt for kp in kps_q])
kps_t=np.float32([kp.pt for kp in kps_t])

对于要考虑多个候选匹配的情况,可以使用基于KNN的匹配过程。KNN不会返回给定特征的单个 最佳匹配,而是返回k个最佳匹配。需要注意的是,k的值必须由用户预先定义。

def matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh):
    matcher=cv.BFMatcher()
    # 使用KNN检测来自A、B图的SIFT特征匹配对,K=2
    rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
    matches = []
    for m in rawMatches:
    # 当最近距离跟次近距离的比值小于ratio值时,保留此匹配对
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
    # 存储两个点在featuresA, featuresB中的索引值
            matches.append((m[0].trainIdx, m[0].queryIdx))
         # 当筛选后的匹配对大于4时,计算视角变换矩阵
            
            
    if len(matches) > 4:
        # 获取匹配对的点坐标
        ptsA = np.float32([kpsA[i] for (_, i) in matches])
        ptsB = np.float32([kpsB[i] for (i, _) in matches])
  
        # 计算视角变换矩阵
        (H, status) = cv.findHomography(ptsA, ptsB, cv.RANSAC, reprojThresh)
        
        return (matches, H, status)
    else:
        return None

关键点匹配图

def drawMatches(imageA, imageB, kpsA, kpsB, matches, status):
    (hA, wA) = imageA.shape[:2]
    (hB, wB) = imageB.shape[:2]
    vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
    vis[0:hA, 0:wA] = imageA
    vis[0:hB, wA:] = imageB
    
    for ((trainIdx, queryIdx), s) in zip(matches, status):
        
        if s == 1:
        # 画出匹配对
            ptA = (int(kpsA[trainIdx][0]), int(kpsA[trainIdx][1]))
            ptB = (int(kpsB[queryIdx][0]) + wA, int(kpsB[queryIdx][1]))
            cv.line(vis, ptA, ptB, (0, 255, 0), 1)
            
    return vis

vis=drawMatches(train_img,quary_img, kps_q, kps_t, matches,status)

连接全景图

width=train_img.shape[1]+quary_img.shape[1]
height=train_img.shape[0]+quary_img.shape[0]
result = cv.warpPerspective(train_img, H, (width, height))
        
#result[0:train_img.shape[0], 0:train_img.shape[1]] = train_img
result[0:quary_img.shape[0], 0:quary_img.shape[1]] = quary_img## 将图片B传入result图片最左端

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值