sift+图像匹配 算法

初次使用sift时,有可能会报错:module ‘cv2.cv2‘ has no attribute ‘xfeatures2d‘
这是因为sift算法申请了专利,在大于某一版本的时候无法调用,解决方案:
先安装指定版本的opencv,再安装opencv-contrib-python,两步都要执行

pip install opencv_python==3.4.2.16
pip install opencv-contrib-python==3.4.2.16

sift算法的原理我们不在这里做过多赘述,有想了解的可以去看我的另一篇博客,这里我们直接分析源码,在源码部分会做出注释。
1.sift  + BFMatxh匹配算法

def sift_image_match(i,img1, img2, img1_, img2_):
    # sift

    sift = cv2.xfeatures2d.SIFT_create() # 创建一个sift对象

    keypoints_1, descriptors_1 = sift.detectAndCompute(img1, None)  # 寻找img1关键点和描述子

    keypoints_2, descriptors_2 = sift.detectAndCompute(img2, None)  # 寻找img2关键点合描述子(这里的描述子其实就每个特征点的128维向量)

    #print(descriptors_1.shape)
    print(len(keypoints_1), len(keypoints_2))

   #
    # feature matching

    bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)  # 图像匹配,创建一个匹配器这里选用的是BFMatcher匹配算法

    matches = bf.match(descriptors_1,descriptors_2) # 输入特征点描述子来计算匹配点对
    
    matches = sorted(matches, key=lambda x: x.distance) # 根据匹配点对之间的相似度来将点对排序

    img3 = cv2.drawMatches(img1_, keypoints_1, img2_, keypoints_2, matches[:50], img2, flags=2)  #使用drawMatcher函数来绘制两张图之间匹配点之间的连线
    name = str(i)+".png"
    # plt.imshow(img3)
    # plt.savefig('res.png')
    # plt.show()
    cv2.imwrite(name, img3)
    # cv2.imshow('a', img3)
    # cv2.waitKey(0)
    return len(matches)

def image_process(img1_path, img2_path):
    img1_ = cv2.imread(img1_path) # plane
    img2_ = cv2.imread(img2_path) # satellite
    img1__shape = img1_.shape
    # print(img1__shape)  # (1080, 1920, 3)
    img2_ = cv2.resize(img2_, (img1__shape[1], img1__shape[0]))
    img1 = cv2.cvtColor(img1_, cv2.COLOR_BGR2GRAY) # 图像匹配算法一般都是基于灰度图像来计算的,因此通过cvtColor函数将BGR图像转换为灰度图
    img2 = cv2.cvtColor(img2_, cv2.COLOR_BGR2GRAY)
    return img1, img2, img1_, img2_

2.sift + FLANN匹配算法

def sift_image_match(i,img1, img2, img1_, img2_):
    # sift

    sift = cv2.xfeatures2d.SIFT_create() # 创建一个sift对象

    keypoints_1, descriptors_1 = sift.detectAndCompute(img1, None)  # 寻找img1关键点和描述子

    keypoints_2, descriptors_2 = sift.detectAndCompute(img2, None)  # 寻找img2关键点合描述子(这里的描述子其实就每个特征点的128维向量)

    #print(descriptors_1.shape)
    print(len(keypoints_1), len(keypoints_2))


    # feature matching
    
    # FLANN匹配器参数设置
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    # 声明FLANN匹配器
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(descriptors_1, descriptors_2, k=2) # k表示返回k近邻个匹配点,这里我们选2,即最近邻和次近邻

    matchesMask = [[0, 0] for i in range(len(matches))]
    
    for i, (m, n) in enumerate(matches): # matches是KeyPoint型变量,由关键点的k近邻的集合构成,m,n是与原图像最相邻的两个匹配
        if m.distance < 0.8 * n.distance: # m表示最近邻,n表示次近邻,当m和n的相似性满足该判别关系的时候,执行
            matchesMask[i] = [1, 0] #将最近邻掩码设1,即可以绘制
    drawpara = dict(singlePointColor=(0, 255, 0), matchColor=(255, 0, 0), matchesMask=matchesMask, flags=2)
    
    img3 = cv.drawMatchesKnn(image1, key1, image2, key2, matches, None, **drawpara)#使用drawMatchesKnn函数来绘制两张图之间匹配点之间的连线
    name = str(i)+".png"
    # plt.imshow(img3)
    # plt.savefig('res.png')
    # plt.show()
    cv2.imwrite(name, img3)
    # cv2.imshow('a', img3)
    # cv2.waitKey(0)
    return len(matches)

def image_process(img1_path, img2_path):
    img1_ = cv2.imread(img1_path) # plane
    img2_ = cv2.imread(img2_path) # satellite
    img1__shape = img1_.shape
    # print(img1__shape)  # (1080, 1920, 3)
    img2_ = cv2.resize(img2_, (img1__shape[1], img1__shape[0]))
    img1 = cv2.cvtColor(img1_, cv2.COLOR_BGR2GRAY) # 图像匹配算法一般都是基于灰度图像来计算的,因此通过cvtColor函数将BGR图像转换为灰度图
    img2 = cv2.cvtColor(img2_, cv2.COLOR_BGR2GRAY)
    return img1, img2, img1_, img2_

这里要注意BF算法和FLANN返回值的差别,虽然二者返回的都是KeyPoint类型,但BF返回的就只最相似的,FLANN返回的则是K近邻,需要通过添加额外的判断语句来进行筛选,例如:

if m.distance < cof * n.distance

cof值越大,我们发现,对距离相差不大的K近邻越不敏感,即不容易筛掉;cof越小,会删掉较多值相差不大的匹配点,lowe认为相邻越近的点越容易造成错配,称为坏点。因此cof取得稍小一点会比较好,但这个要根据实验结果来定,不是越小越好

2. sift + FLANN + Ransac
这里的Ransac算法是为了删除一些错配的点,通过findFunfamentalMat()函数实现

import cv2
import os
import numpy as np
import matplotlib.pyplot as plt

# read images

def sift_image_match(i1,img1, img2, img1_, img2_):
    # sift

    sift = cv2.xfeatures2d.SIFT_create() #

    keypoints_1, descriptors_1 = sift.detectAndCompute(img1, None)  # 寻找关键点和描述子

    keypoints_2, descriptors_2 = sift.detectAndCompute(img2, None)

    #print(descriptors_1.shape)


   #
    # feature matching

    bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)  # 图像匹配
    bfmatches = bf.match(descriptors_1,descriptors_2)
    bfmatches_co = bfmatches
    # FLANN匹配器参数设置
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    # 声明FLANN匹配器
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(descriptors_1, descriptors_2, k=2)  # k表示返回k近邻个匹配点,这里我们选2,即最近邻和次近邻
    pts1 = []
    pts2 = []
    for i, (m, n) in enumerate(matches):  # matches是KeyPoint型变量,由关键点的k近邻的集合构成
        if m.distance < 0.8 * n.distance:  # m表示最近邻,n表示次近邻,当m和n的相似性满足该判别关系的时候,执行
            pts1.append(keypoints_1[m.queryIdx].pt)  # queryIdx表示当先图像中的查询点的索引, .pt方法是从KeyPoint类型中获取坐标值(这里的kp[]是KeyPoint类型)
            pts2.append(keypoints_2[m.trainIdx].pt)  # trainIdx表示匹配图中与查询点匹配的点的索引

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    # 根据匹配点对计算基础矩阵,该函数会通过ransac算法筛选错配点
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

    # 寻找内部点,即为筛选后的匹配点
    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]

    assert len(pts1) == len(pts2)

    print(len(keypoints_1), len(pts2))
    bfmatches = sorted(bfmatches, key=lambda x: x.distance) #为匹配点按相似度排序

    #img3 = cv2.drawMatches(img1_, keypoints_1, img2_, keypoints_2, bfmatches[:5], img2, flags=2)

    # for i, bfm in enumerate(bfmatches):
    #     print(keypoints_1[bfm.queryIdx].pt)
    #     print(keypoints_2[bfm.trainIdx].pt)
    #     if i == 5:
    #         break
    for bfm, pt1, pt2 in zip(bfmatches,pts1, pts2): #这里bfmatches是KeyPoint类型,bfm是cv2.DMatch类型,包含queryIdx, \
                                                    #trainIdx,idx, distance四个属性
        keypoints_1[bfm.queryIdx].pt = tuple(pt1)   #将筛选后的点坐标赋给原来的bfmatches中的点,这样我们在drawMatches时就仅剩筛选之后的点了
        keypoints_2[bfm.trainIdx].pt = tuple(pt2)
    # for i, bfm in enumerate(bfmatches):
    #     print(keypoints_1[bfm.queryIdx].pt)
    #     print(keypoints_2[bfm.trainIdx].pt)
    #     if i == 5:
    #         break

    img3 = cv2.drawMatches(img1_, keypoints_1, img2_, keypoints_2, bfmatches[:5], img2, flags=2)
    name = str(i1)+"_1.png"

    cv2.imwrite(name, img3)

    return len(matches)

相关链接:https://blog.csdn.net/qq_36622009/article/details/104919996
                  https://blog.csdn.net/weixin_44072651/article/details/89262277

  • 3
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值