import numpy as np
import cv2
import matplotlib.pyplot as plt
# 使用shi-tomasi检测器
img = cv2.imread("C:/bridge.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 将原图像转化为gray格式
gray = np.float32(gray) # 转化为浮点类型
corners = cv2.goodFeaturesToTrack(gray, 10, 0.1, 10) # 检测角 gray为浮点图像
corners = np.int0(corners)
for i in corners:
x, y = i.ravel()
cv2.circle(img, (x, y), 4, (0, 0, 255), -1) # 用带有颜色的圆点标注角
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 转化为rgb格式
plt.imshow(img)
plt.axis('off')
plt.show() # 显示检测结果
import cv2
import numpy as np
import matplotlib.pyplot as plt
# 应用ORB特征匹配器和暴力匹配器
img1 = cv2.imread("C:/printer1.jpg", cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread("C:/printer2.jpg", cv2.IMREAD_GRAYSCALE)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None) # 检测关键点和计算描述符
bf = cv2.BFMatcher_create(cv2.NORM_HAMMING, crossCheck=True) # 创建匹配器
ms = bf.match(des1, des2) # 执行特征匹配
ms = sorted(ms, key = lambda x:x.distance) # 按照距离排序
matchesMask = None
if len(ms)>10:
# 计算查询图像匹配结果的坐标
querypts = np.float32([kp1[m.queryIdx].pt for m in ms]).reshape(-1, 1, 2)
# 计算训练图像匹配结果的坐标
trainpts = np.float32([kp2[m.trainIdx].pt for m in ms]).reshape(-1, 1, 2)
# 查询图像和训练图像的透视转换
retv, mask = cv2.findHomography(querypts, trainpts, cv2.RANSAC)
# 计算最佳匹配结果的掩模
matchesMask = mask.ravel().tolist()
h, w = img1.shape
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)
# 获得查询图像在训练图像中的位置
dst = cv2.perspectiveTransform(pts, retv)
img2 = cv2.polylines(img2, [np.int32(dst)], True, (255, 255,255), 5)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, ms, None, matchColor=(0, 255, 0),# 用绿色绘制匹配结果
singlePointColor=None,matchesMask= matchesMask,
flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.imshow(img3)
plt.axis('off')
plt.show() # 显示检测结果