OpenCV笔记03

SIFT(Scale Invariant Feature Transform)尺度不变特征变换算法原理

特点

  1. 对旋转、尺度缩放、亮度变化保持不变性;对视角变化、仿射变换、噪声也保持一定程度的稳定性。
  2. 适用于在海量特征数据库中进行快速、准确的匹配。
  3. 即使少数的几个物体也可以产生大量的SIFT特征向量。
  4. 经优化的SIFT匹配算法甚至可以达到实时的要求。
  5. 可以很方便的与其他形式的特征向量进行联合。

解决的问题

  1. 目标的旋转、缩放、平移

  2. 图像仿射/投影变换

  3. 光照影响

  4. 目标遮挡

  5. 杂物场景

  6. 噪声

步骤

  1. 尺度空间极值检测
  2. 关键点定位
  3. 方向确定
  4. 关键点描述

概念

  1. 二维高斯函数
  2. 图像的二维高斯模糊及其分离
  3. 尺度空间
  4. 高斯金字塔
  5. 高斯差分金字塔
  6. 边缘响应的消除
  7. 有限差分法求导

缺点

  1. 实时性不高。

  2. 有时特征点较少。

  3. 对边缘光滑的目标无法准确提取特征点。

速度

ORB = 10 * SURF = 100 * SIFT

BF(Brute-Force)暴力匹配

ORB(ORiented Brief)特征检测器

1

# -*- coding: utf-8-*-
import cv2
from matplotlib import pyplot as plt
template = cv2.imread("sources/gk1.jpg", 0)
target = cv2.imread("sources/gk2.jpg", 0)  # 格式为灰度图
orb = cv2.ORB_create()  # 建立orb特征检测器
kp1, des1 = orb.detectAndCompute(template, None)  # 计算template中的特征点和描述符
kp2, des2 = orb.detectAndCompute(target, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)  # 建立匹配关系
matches = bf.match(des1, des2)  # 匹配描述符
matches = sorted(matches, key=lambda x: x.distance)  # 根据距离来排序
result = cv2.drawMatches(template, kp1, target, kp2, matches[:40], None, flags=2)  # 画出匹配关系
plt.imshow(result), plt.show()  # matplotlib描绘出来

在这里插入图片描述

2

import cv2

imgname1 = 'sources/gk1.jpg'
imgname2 = 'sources/gk2.jpg'
orb = cv2.ORB_create()
img1 = cv2.imread(imgname1)
kp1, des1 = orb.detectAndCompute(img1, None)
img2 = cv2.imread(imgname2)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
    if m.distance < 0.75 * n.distance:
        good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
cv2.imshow("ORB", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()

在这里插入图片描述

SIFT

import cv2

imgname1 = 'sources/gk1.jpg'
imgname2 = 'sources/gk2.jpg'
sift = cv2.xfeatures2d.SIFT_create()
img1 = cv2.imread(imgname1)
kp1, des1 = sift.detectAndCompute(img1, None)
img2 = cv2.imread(imgname2)
kp2, des2 = sift.detectAndCompute(img2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
    if m.distance < 0.75 * n.distance:
        good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
cv2.imshow("BFmatch", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()

在这里插入图片描述

FLANN(Fast Library for Approximate Nearest Neighbors)快速近似(逼近)最近邻特征匹配

SIFT

1

# -*- coding: utf-8-*-
import cv2
from matplotlib import pyplot as plt
queryImage = cv2.imread("sources/gk1.jpg", 0)
trainingImage = cv2.imread("sources/gk2.jpg", 0)
sift = cv2.xfeatures2d.SIFT_create()  # 创建sift检测器
kp1, des1 = sift.detectAndCompute(queryImage, None)
kp2, des2 = sift.detectAndCompute(trainingImage, None)
FLANN_INDEX_KDTREE = 0  # 设置Flann参数
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
searchParams = dict(checks=50)
flann = cv2.FlannBasedMatcher(indexParams, searchParams)
matches = flann.knnMatch(des1, des2, k=2)
matchesMask = [[0, 0] for i in range(len(matches))]  # 设置初始匹配值
for i, (m, n) in enumerate(matches):
    if m.distance < 0.5 * n.distance:  # 舍弃小于0.5的匹配结果
        matchesMask[i] = [1, 0]
drawParams = dict(matchColor=(0, 0, 255), singlePointColor=(255, 0, 0), matchesMask=matchesMask, flags=0)
# 给特征点和匹配的线定义颜色
resultImage = cv2.drawMatchesKnn(queryImage, kp1, trainingImage, kp2, matches, None, **drawParams)
plt.imshow(resultImage,), plt.show()

在这里插入图片描述

2

# -*- coding: utf-8-*-
import numpy as np
import cv2
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 10  # 设置最低特征点匹配数量为10
template = cv2.imread('sources/gk1.jpg', 0)
target = cv2.imread('sources/gk2.jpg', 0)
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(template, None)
kp2, des2 = sift.detectAndCompute(target, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)

good = []
for m, n in matches:  # 舍弃大于0.7的匹配
    if m.distance < 0.7 * n.distance:
        good.append(m)
if len(good) > MIN_MATCH_COUNT:  # 获取关键点的坐标
    src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)  #
    matchesMask = mask.ravel().tolist()
    h, w = template.shape
    # 使用得到的变换矩阵对原图像的四个角进行变换,获得在目标图像上对应的坐标
    pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
    dst = cv2.perspectiveTransform(pts, M)
    cv2.polylines(target, [np.int32(dst)], True, 0, 2, cv2.LINE_AA)
else:
    print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
    matchesMask = None
draw_params = dict(matchColor=(0, 255, 0),
                   singlePointColor=None,
                   matchesMask=matchesMask,
                   flags=2)
result = cv2.drawMatches(template, kp1, target, kp2, good, None, **draw_params)
plt.imshow(result, 'gray')
plt.show()

在这里插入图片描述

3

import cv2

imgname1 = 'sources/gk1.jpg'
imgname2 = 'sources/gk2.jpg'
sift = cv2.xfeatures2d.SIFT_create()
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, tree=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
img1 = cv2.imread(imgname1)
kp1, des1 = sift.detectAndCompute(img1, None)
img2 = cv2.imread(imgname2)
kp2, des2 = sift.detectAndCompute(img2, None)
matches = flann.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
    if m.distance < 0.7 * n.distance:
        good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
cv2.imshow("FLANN", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()

在这里插入图片描述

SURF

import cv2

imgname1 = 'sources/gk1.jpg'
imgname2 = 'sources/gk2.jpg'
surf = cv2.xfeatures2d.SURF_create()
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, tree=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
img1 = cv2.imread(imgname1)
kp1, des1 = surf.detectAndCompute(img1, None)
img2 = cv2.imread(imgname2)
kp2, des2 = surf.detectAndCompute(img2, None)
matches = flann.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
    if m.distance < 0.7 * n.distance:
        good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
cv2.imshow("SURF", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()

在这里插入图片描述

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值