import cv2
import shutil
import numpy as np
from OpenCV.Halcon import *
import time
def scale_image(image, percent, maxwh):
max_width = maxwh[1]
max_height = maxwh[0]
max_percent_width = max_width / image.shape[1] * 100
max_percent_height = max_height / image.shape[0] * 100
max_percent = 0
if max_percent_width < max_percent_height:
max_percent = max_percent_width
else:
max_percent = max_percent_height
if percent > max_percent:
percent = max_percent
width = int(image.shape[1] * percent / 100)
height = int(image.shape[0] * percent / 100)
result = cv2.resize(image, (width, height), interpolation = cv2.INTER_AREA)
return result, percent
def rotate_image(image, angle,expandImage=True):
if expandImage:
height, width = image.shape[:2]
center = ((width - 1) / 2, (height - 1) / 2)
rotate_matrix = cv2.getRotationMatrix2D(center=center, angle=-angle, scale=1)
bound_w = height * np.abs(rotate_matrix[0, 1]) + width * np.abs(rotate_matrix[0, 0])
bound_h = height * np.abs(rotate_matrix[0, 0]) + width * np.abs(rotate_matrix[0, 1])
bound_w = int(round(bound_w, 10))
bound_h = int(round(bound_h, 10))
rotate_matrix[0, 2] += (bound_w - 1) / 2 - center[0]
rotate_matrix[1, 2] += (bound_h - 1) / 2 - center[1]
result = cv2.warpAffine(src=image, M=rotate_matrix, dsize=(bound_w, bound_h), borderMode=cv2.BORDER_CONSTANT,borderValue=(0, 0, 0))
else:
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, -angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def MatchTemplate(rgbimage, rgbtemplate, method, matched_thresh, rot_range, rot_interval, scale_range,scale_interval, rm_redundant, limit,numLevels,expandImage):
points_list = []
image_maxwh = rgbimage.shape
height, width, numchannel = rgbtemplate.shape
all_points = [[(-1, -1), -1, -1, -1, -1]]
for i in range(numLevels):
rgbimage = cv2.pyrDown(rgbimage)
rgbtemplate = cv2.pyrDown(rgbtemplate)
for next_angle in range(rot_range[0], rot_range[1], rot_interval):
for next_scale in range(scale_range[0], scale_range[1], scale_interval):
# 缩放匹配
scaled_template, actual_scale = scale_image(rgbtemplate, next_scale, image_maxwh)
# actual_scale = 0
if next_angle == 0:
rotated_template = rgbtemplate
else:
# 旋转图像
rotated_template = rotate_image(rgbtemplate, next_angle,expandImage)
if method == "TM_CCOEFF":
matched_points = cv2.matchTemplate(rgbimage, rotated_template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(matched_points)
if max_val >= matched_thresh:
all_points.append([max_loc, next_angle, actual_scale, max_val])
elif method == "TM_CCOEFF_NORMED":
matched_points = cv2.matchTemplate(rgbimage, rotated_template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(matched_points)
results = matched_points
cv2.normalize(results, results, 0, 1, cv2.NORM_MINMAX, -1)
_, score, _, _ = cv2.minMaxLoc(results)
print('角度为:', next_angle, '分数为:', max_val,'模板图大小:',rotated_template.shape)
if max_loc[0] >= limit and max_loc[1] >= limit:
all_points.append([(max_loc[0]* (2 ** numLevels),max_loc[1]* (2 ** numLevels)), next_angle, actual_scale, max_val, score])
elif method == "TM_CCORR":
matched_points = cv2.matchTemplate(rgbimage, rotated_template, cv2.TM_CCORR)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(matched_points)
if max_val >= matched_thresh:
all_points.append([max_loc, next_angle, actual_scale, max_val])
elif method == "TM_CCORR_NORMED":
# 进行匹配
matched_points = cv2.matchTemplate(rgbimage, rotated_template, cv2.TM_CCORR_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(matched_points)
# if max_val >= matched_thresh:
# 匹配位置,角度,缩放大小,相似度分数
print('角度为:',next_angle,'分数为:',max_val)
all_points.append([(max_loc[0]* (2 ** numLevels),max_loc[1]* (2 ** numLevels)), next_angle, actual_scale, max_val])
elif method == "TM_SQDIFF":
matched_points = cv2.matchTemplate(rgbimage, rotated_template, cv2.TM_SQDIFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(matched_points)
if min_val <= matched_thresh:
all_points.append([min_loc, next_angle, actual_scale, min_val])
elif method == "TM_SQDIFF_NORMED":
matched_points = cv2.matchTemplate(rgbimage, rotated_template, cv2.TM_SQDIFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(matched_points)
if min_val <= matched_thresh:
all_points.append([min_loc, next_angle, actual_scale, min_val])
else:
raise MethodError("There's no such comparison method for template matching.")
if method == "TM_CCOEFF":
all_points = sorted(all_points, key=lambda x: -x[3])
elif method == "TM_CCOEFF_NORMED":
# 按匹配分数进行排序
all_points = sorted(all_points, key=lambda x: -x[3])
elif method == "TM_CCORR":
all_points = sorted(all_points, key=lambda x: -x[3])
elif method == "TM_CCORR_NORMED":
all_points = sorted(all_points, key=lambda x: -x[3])
elif method == "TM_SQDIFF":
all_points = sorted(all_points, key=lambda x: x[3])
elif method == "TM_SQDIFF_NORMED":
all_points = sorted(all_points, key=lambda x: x[3])
# 删除冗余
if rm_redundant == True:
points_list.append(all_points[0])
else:
points_list = all_points
return points_list
# 一个矩形经过给定角度旋转后的四个顶点坐标
def GetRotatePoints(img, inRect, angle):
rect = inRect
pts = np.zeros((4, 2), dtype=np.int32)
center = (img.shape[1] / 2, img.shape[0] / 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
ptMat = np.ones((3, 4), dtype=np.float32)
ptMat[0, 0] = 0
ptMat[0, 1] = rect[2] - 1
ptMat[0, 2] = rect[2] - 1
ptMat[0, 3] = 0
ptMat[1, 0] = 0
ptMat[1, 1] = 0
ptMat[1, 2] = rect[3] - 1
ptMat[1, 3] = rect[3] - 1
M = M.astype(np.float32)
result = np.dot(M, ptMat)
pts[0] = (int(result[0, 0]), int(result[1, 0]))
pts[1] = (int(result[0, 1]), int(result[1, 1]))
pts[2] = (int(result[0, 2]), int(result[1, 2]))
pts[3] = (int(result[0, 3]), int(result[1, 3]))
return pts
if __name__ == '__main__':
# 读取模板图像,shape = [226,226] width = 226 height = 226
temp = cv2.imdecode(np.fromfile(r'D:\BaiduNetdiskDownload\Match_With_Angle\Match_With_Angle\MatchTemplate_With_Angle\bin\Debug\0.bmp', dtype=np.uint8),1)
# 读取实际图像 shape = [2039,2066] width = 2066 height = 2039
src = cv2.imdecode(np.fromfile(r'D:\BaiduNetdiskDownload\Match_With_Angle\Match_With_Angle\MatchTemplate_With_Angle\bin\Debug\7.bmp', dtype=np.uint8),1)
t1 = time.time()
# ResultPoint = CircleMatchNcc(src, temp, -5, 10, 1, 3, 0.7, 5)
points_list = MatchTemplate(src, temp, "TM_CCOEFF_NORMED", 0.7, [0, 360], 1, [100, 150], 150, True, 1, 2,False)
t2 = time.time()
print('points_list:',points_list)
print('ct:',t2-t1)
# 匹配到的[X、Y、W、H]
X = points_list[0][0][0]
Y = points_list[0][0][1]
W = temp.shape[1]
H = temp.shape[0]
print(X,Y,W,H)
# 那么四个点坐标应该是
pts=[[X,Y],[X+W,Y],[X,Y+H],[X+W,Y+H]]
pt = (X+W/2,Y+H/2)
r = cv2.getRotationMatrix2D(pt, -140, 1.0)
# 进行仿射变换
new_pts = []
for point in pts:
print(point)
src_point = np.array([point], dtype=np.float32)
dst_point = cv2.transform(src_point.reshape(-1, 1, 2), r)
new_pts.append(dst_point)
print('new_pts:', new_pts)
print('new_pts:', new_pts[0][0][0][0])
print('new_pts:', new_pts[0][0][0][1])
print('new_pts:', new_pts[1][0][0][0])
print('new_pts:', new_pts[1][0][0][1])
print((new_pts[0][0][0][0],new_pts[0][0][0][1]))
print((new_pts[1][0][0][0],new_pts[1][0][0][1]))
cv2.line(src, (int(new_pts[0][0][0][0]),int(new_pts[0][0][0][1])), (int(new_pts[1][0][0][0]),int(new_pts[1][0][0][1])),(0,0,255), 2)
cv2.line(src, (int(new_pts[1][0][0][0]),int(new_pts[1][0][0][1])), (int(new_pts[3][0][0][0]),int(new_pts[3][0][0][1])),(0,0,255), 2)
cv2.line(src, (int(new_pts[3][0][0][0]),int(new_pts[3][0][0][1])), (int(new_pts[2][0][0][0]),int(new_pts[2][0][0][1])),(0,0,255), 2)
cv2.line(src, (int(new_pts[2][0][0][0]),int(new_pts[2][0][0][1])), (int(new_pts[0][0][0][0]),int(new_pts[0][0][0][1])),(0,0,255), 2)
cv2.imwrite('./model.jpg', src)
exit()
# 获取角点坐标
pts = GetRotatePoints(temp, rect, ResultPoint[0][2]+360)
print(pts)
print('pts[0]:',pts[0])
print('pts[1]:',pts[1])
print(ResultPoint[0][0])
print(ResultPoint[0][1])
print('label_StartPoint:',pts[0] + ResultPoint[0][0])
print('label_EndPoint:', pts[1] + ResultPoint[0][1])
cv2.line(src, (pts[0][0] + ResultPoint[0][0],pts[0][1] + ResultPoint[0][1]), (pts[1][0] + ResultPoint[0][0],pts[1][1] + ResultPoint[0][1]),(0,0,255), 2)
cv2.line(src, (pts[1][0] + ResultPoint[0][0],pts[1][1] + ResultPoint[0][1]), (pts[2][0] + ResultPoint[0][0],pts[2][1] + ResultPoint[0][1]),(0,0,255), 2)
cv2.line(src, (pts[2][0] + ResultPoint[0][0],pts[2][1] + ResultPoint[0][1]), (pts[3][0] + ResultPoint[0][0],pts[3][1] + ResultPoint[0][1]),(0,0,255), 2)
cv2.line(src, (pts[3][0] + ResultPoint[0][0],pts[3][1] + ResultPoint[0][1]), (pts[0][0] + ResultPoint[0][0],pts[0][1] + ResultPoint[0][1]),(0,0,255), 2)
cv2.imwrite('./model.jpg',src)
print('pts:',pts)
opencv-模板匹配
于 2023-11-23 13:03:17 首次发布
本文介绍了使用Python和OpenCV库实现的图像处理函数,包括缩放和旋转图像,以及多种模板匹配方法(如TM_CCOEFF,TM_CCOEFF_NORMED等)在实际图像上的应用,以寻找与模板的最佳匹配位置。
摘要由CSDN通过智能技术生成