# 基于FLANN的匹配器(FLANN based Matcher)定位图片,只有opencv版本低于3.4.2.16才能使用SIFT特征点匹配方法import numpy as np
import cv2
from matplotlib import pyplot as plt
MIN_MATCH_COUNT =7# 设置最低特征点匹配数量为7,如果找到的特征点数大于这个值,那么就匹配成功并框出在目标图像上匹配到的模板位置
template = cv2.imread('C:/Users/simao.wang.HIRAIN/Desktop/Demotemp.jpg',0)# 模板图像,需要人为输入
target = cv2.imread('C:/Users/simao.wang.HIRAIN/Desktop/Demo.jpg',0)# 输入的目标图像,目的是为了找到模板图像在目标图像中的位置# 创建sift(尺度不变特征变换匹配算法)检测器
sift = cv2.xfeatures2d.SIFT_create()# 在模板图像和目标图像中分别提取特征点
kp1, des1 = sift.detectAndCompute(template,None)
kp2, des2 = sift.detectAndCompute(target,None)# 创建设置FLANN快速最近邻逼近搜索匹配# KDtree建立索引方式的常量参数
FLANN_INDEX_KDTREE =0
index_params =dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params =dict(checks=50)#checks指定索引树要被遍历的次数为50
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)#进行匹配搜索# 寻找距离近的特征点放入列表
good =[]for m, n in matches:if m.distance <0.7* n.distance:
good.append(m)iflen(good)> MIN_MATCH_COUNT:# 获取关键点的坐标
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2)# 计算变换矩阵和MASK
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h, w = template.shape
# 使用得到的变换矩阵对原图像的四个角进行变换,获得在目标图像上对应的坐标
pts = np.float32([[0,0],[0, h -1],[w -1, h -1],[w -1,0]]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts, M)
cv2.polylines(target,[np.int32(dst)],True,0,2, cv2.LINE_AA)else:print("Not enough matches are found - %d/%d"%(len(good), MIN_MATCH_COUNT))
matchesMask =None
draw_params =dict(matchColor=(0,255,0),
singlePointColor=None,
matchesMask=matchesMask,
flags=2)
result = cv2.drawMatches(template, kp1, target, kp2, good,None,**draw_params)
plt.imshow(result,'gray')
plt.show()#SIFT原理:# 1.通过一个变化尺度的高斯函数对图像进行尺度空间变换,小尺度对应于图像的细节特征,大尺度对应于图像的概貌特征(尺度空间)# 2.建立高斯图像金字塔和DOG高斯差分金字塔。可以通过高斯差分图像(DOG在计算上只需相邻高斯平滑后图像相减)看出图像上的像素值变化情况。(如果没有变化,也就没有特征。特征必须是变化尽可能多的点。)DOG图像描绘的是目标的轮廓。# 3.局部极值检测。中间的检测点和它同尺度的8个相邻点和上下相邻尺度对应的9×2个 点共26个点比较,以确保在尺度空间和二维图像空间都检测到极值点。# 4.去除边缘效应,实现关键的的精确定位# 5.关键点主方向分配 关键点主方向分配就是基于图像局部的梯度方向,分配给每个关键点位置一个或多个方向。所有后面的对图像数据的操作都相对于关键点的方向、尺度和位置进行变换,使得描述符具有旋转不变性。# 6.用一组向量描述关键点的特征,每一个关键点,拥有三个信息:位置、尺度以及方向# 7.利用欧氏距离算出模板和目标图像之间的特征关键点距离,距离越小说明相似度越高。关键点匹配采用Kd树(平衡二叉树)来完成搜索,以目标图像的关键点为基准,搜索与目标图像的特征点最邻近的原图像特征点和次邻近的原图像特征点
# 指针读数import cv2
import numpy as np
defavg_circles(circles, b):
avg_x =0
avg_y =0
avg_r =0for i inrange(b):# optional - average for multiple circles (can happen when a gauge is at a slight angle)
avg_x = avg_x + circles[0][i][0]
avg_y = avg_y + circles[0][i][1]
avg_r = avg_r + circles[0][i][2]
avg_x =int(avg_x /(b))
avg_y =int(avg_y /(b))
avg_r =int(avg_r /(b))return avg_x, avg_y, avg_r
defdist_2_pts(x1, y1, x2, y2):return np.sqrt((x2 - x1)**2+(y2 - y1)**2)defcalibrate_gauge(gauge_number, file_type,img):'''
This function should be run using a test image in order to calibrate the range available to the dial as well as the
units. It works by first finding the center point and radius of the gauge. Then it draws lines at hard coded intervals
(separation) in degrees. It then prompts the user to enter position in degrees of the lowest possible value of the gauge,
as well as the starting value (which is probably zero in most cases but it won't assume that). It will then ask for the
position in degrees of the largest possible value of the gauge. Finally, it will ask for the units. This assumes that
the gauge is linear (as most probably are).
It will return the min value with angle in degrees (as a tuple), the max value with angle in degree45s (as a tuple),
and the units (as a string).
这个函数用测试图片来校准刻度盘和刻度盘可用的范围单位。需要之前所得的中心点以及半径。然后绘制出刻度。
需要输入表盘读数最小角度,最大角度,最小值,最大值,以及单位(min_angle,max_angle,min_value,max_value,units)
'''
height, width = img.shape[:2]# 将图片转为灰度图片
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)# convert to gray# 霍夫圆环检测# image:8位,单通道图像# method:定义检测图像中圆的方法。目前唯一实现的方法cv2.HOUGH_GRADIENT。# dp:累加器分辨率与图像分辨率的反比。dp获取越大,累加器数组越小。# minDist:检测到的圆的中心,(x,y)坐标之间的最小距离。如果minDist太小,则可能导致检测到多个相邻的圆。如果minDist太大,则可能导致很多圆检测不到。# param1:用于处理边缘检测的梯度值方法。# param2:cv2.HOUGH_GRADIENT方法的累加器阈值。阈值越小,检测到的圈子越多。# minRadius:半径的最小大小(以像素为单位)。# maxRadius:半径的最大大小(以像素为单位)。
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT,1,20, np.array([]),100,50,int(height *0.4),int(height *0.5))
a, b, c = circles.shape
# 获取圆的坐标x,y和半径r
x, y, r = avg_circles(circles, b)
cv2.circle(img,(x, y), r,(0,0,255),3, cv2.LINE_AA)# 用红色画圆
cv2.circle(img,(x, y),2,(0,255,0),3, cv2.LINE_AA)# 画圆心# for testing, output circles on image# cv2.imwrite('gauge-%s-circles.%s' % (gauge_number, file_type), img)# for calibration, plot lines from center going out at every 10 degrees and add marker# for i from 0 to 36 (every 10 deg)'''
goes through the motion of a circle and sets x and y values based on the set separation spacing. Also adds text to each
line. These lines and text labels serve as the reference point for the user to enter
NOTE: by default this approach sets 0/360 to be the +x axis (if the image has a cartesian grid in the middle), the addition
(i+9) in the text offset rotates the labels by 90 degrees so 0/360 is at the bottom (-y in cartesian). So this assumes the
gauge is aligned in the image, but it can be adjusted by changing the value of 9 to something else.
根据画出的刻度值,给定x,y的值,并在此位置添加文本信息。
这些刻度和文本标签用作用户输入的参考点
'''
separation =5.0# 每格刻度的度数值
interval =int(360/ separation)
p1 = np.zeros((interval,2))# set empty arrays
p2 = np.zeros((interval,2))
p_text = np.zeros((interval,2))for i inran