SIFT算法好像专利到期了,根据opencv官方文档
Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform ( SIFT) algorithm by D. Lowe [145] . [145]David G. Lowe. Distinctive image features from scale-invariant keypoints. Int. J. Comput. Vision, 60(2):91–110, November 2004.
刚把python代码debug完,差点怀疑人生,感觉看懂和自己写出来还是两个不同的事情。文章介绍后期在更,先贴几个感觉写的比较好的链接
SIFT算法原理详解 - Alliswell_WP - 博客园www.cnblogs.com![v2-a172df196132ef3a04944cf515d14c99_180x120.jpg](http://img-02.proxy.5ce.com/view/image?&type=2&guid=144eb023-502f-eb11-8da9-e4434bdf6706&url=https://pic2.zhimg.com/v2-a172df196132ef3a04944cf515d14c99_180x120.jpg)
有作者y用c++和MATLAB实现SIFT,链接如下
https://github.com/Granvallen/SIFTgithub.com先放结果图:
主函数提取的特征
![v2-4e77182059ff51d3ffc76b789ff0b4bd_b.jpg](http://img-01.proxy.5ce.com/view/image?&type=2&guid=144eb023-502f-eb11-8da9-e4434bdf6706&url=https://pic2.zhimg.com/v2-4e77182059ff51d3ffc76b789ff0b4bd_b.jpg)
test_match匹配结果,暴力匹配:
![v2-4fd6762fc32dd1d8a9a9130759e457d2_b.jpg](http://img-03.proxy.5ce.com/view/image?&type=2&guid=144eb023-502f-eb11-8da9-e4434bdf6706&url=https://pic3.zhimg.com/v2-4fd6762fc32dd1d8a9a9130759e457d2_b.jpg)
# -*- coding: utf-8 -*-
match代码:
# -*- coding: utf-8 -*-
"""
@author: 73766
"""
import cv2
from test_sift import *
import random
def matcher(desc1,desc2,thr):
matchs = []
minm = 100
for i in range(len(desc1)):
for j in range(len(desc2)):
dst = np.sqrt(np.sum((desc1[i]['hist'] - desc2[j]['hist'])**2))
minm = np.min([minm,dst])
if dst < thr:
matchs.append([i,j])
print(minm)
return matchs
input = 'lena1.jpg'
img = cv2.imread(input,0)
img1 = cv2.imread(input)
start =time.time()
s = SIFT(nfeatures = 0,
edgeThreshold = 10,
contrastThreshold = 0.04)
keypoints1,descriptors1,boxes = s.run_sift_feaures(img)
end =time.time()
print(end - start,'s')
for i in range(len(keypoints1)):
cv2.circle(img1, (int(keypoints1[i].x), int(keypoints1[i].y)), int(keypoints1[i].size), (0, 255,0), 1)
ptStart = (int(keypoints1[i].x), int(keypoints1[i].y))
ptEnd = (int(keypoints1[i].x) + int(20*np.cos(keypoints1[i].angle/180*3.14)),
int(keypoints1[i].y) - int(20*np.sin(keypoints1[i].angle/180*3.14)))
cv2.line(img1, ptStart, ptEnd, (0, 0,255), 2, 4)
input = 'lena2.jpg'
img = cv2.imread(input,0)
img2 = cv2.imread(input)
start =time.time()
s = SIFT(nfeatures = 0 ,
edgeThreshold = 10,
contrastThreshold = 0.04)
keypoints2,descriptors2,boxes = s.run_sift_feaures(img)
end =time.time()
for i in range(len(keypoints2)):
cv2.circle(img2, (int(keypoints2[i].x), int(keypoints2[i].y)), int(keypoints2[i].size), (0, 255,0), 1)
ptStart = (int(keypoints2[i].x), int(keypoints2[i].y))
ptEnd = (int(keypoints2[i].x) + int(20*np.cos(keypoints2[i].angle/180*3.14)),
int(keypoints2[i].y) - int(20*np.sin(keypoints2[i].angle/180*3.14)))
cv2.line(img2, ptStart, ptEnd, (0, 0,255), 1, 4)
print(end - start,'s')
matchs = matcher(descriptors1,descriptors2,0.25)
print(len(matchs))
col = np.max([img1.shape[0],img2.shape[0]])
row = img1.shape[1] + img2.shape[1]
channel = 3
image_mathch = np.zeros((col,row,channel),dtype=np.uint8)
image_mathch[:img1.shape[0],:img1.shape[1],:] = img1
image_mathch[:img2.shape[0],img1.shape[1]:,:] = img2
for i in range(len(matchs)):
b = random.randint(0,255)
g = random.randint(0,255)
r = random.randint(0,255)
point_color = (b,g,r)
x1 = int(descriptors1[matchs[i][0]]['x'])
y1 = int(descriptors1[matchs[i][0]]['y'])
ptStart = (x1,y1)
x2 = int(descriptors2[matchs[i][1]]['x']) + img1.shape[0]
y2 = int(descriptors2[matchs[i][1]]['y'])
ptEnd = (x2,y2)
cv2.line(image_mathch, ptStart, ptEnd, point_color, 1, 4)
cv2.imshow("image", image_mathch)
cv2.waitKey(0)