numpy.count_nonzero用法详解

本文深入解析了numpy库中的count_nonzero函数,该函数用于统计数组中非零元素的数量。文章通过多个实例展示了如何使用该函数,并解释了参数axis和keepdims的作用,帮助读者更好地理解和应用此功能。

numpy.count_nonzero是用于统计数组中非零元素的个数

详细用法: numpy.count_nonzero(a, axis=None, *, keepdims=False)

a: 为需要统计数组名

axis: 为统计的轴,当axis=0时统计数组y轴(每列) 非零元素个数,当axis=1时统计数组每 x轴(每行) 非零元素个数, 另外,axis可以为元组

具体还是看下例子帮助理解下吧👇

一个栗子:

import numpy as np

a = np.zeros((3,4))
print(np.count_nonzero(a))  
'''
0
'''
b = np.eye(4)
print(np.count_nonzero(b))  
'''
4
'''
c = np.array([[1,2,0],[0,1,0]])
print(c)
'''
[[1 2 0]
 [0 1 0]]
'''
print(np.count_nonzero(c,axis=0)) 
'''
[1 2 0]
'''
print(np.count_nonzero(c,axis=1)) 
'''
[2 1]
'''
d = np.array([[[1,2,1],[0,1,2],[0,2,3],[0,2,3]],[[1,2,1],[0,1,2],[0,2,3],[0,2,3]]])
print(d)
'''
[[[1 2 1]
  [0 1 2]
  [0 2 3]
  [0 2 3]]

 [[1 2 1]
  [0 1 2]
  [0 2 3]
  [0 2 3]]]
'''
print(np.count_nonzero(d,axis=0)) 
'''
[[2 2 2]
 [0 2 2]
 [0 2 2]
 [0 2 2]]
'''
print(np.count_nonzero(d,axis=1))
'''
[[1 4 4]
 [1 4 4]]
'''
print(np.count_nonzero(d,axis=(0,1))) #[2 8 8]
print(np.count_nonzero(d,axis=(0,1,2))) #18

希望这个例子可以便于大家理解,上面还有个keepdims,我也不是很清楚这是啥,需要了解的可以看下官网

官网直达链接:numpy.count_nonzero

Traceback (most recent call last): File "d:\海康\MVS\Development\Samples\Python\MvImport\two.py", line 702, in <module> class MainWindow(QMainWindow): File "d:\海康\MVS\Development\Samples\Python\MvImport\two.py", line 820, in MainWindow brightness = np.mean(gray) NameError: name 'gray' is not defined这个错误是下面这个代码的 # -*- coding: utf-8 -*- import sys import os import cv2 import numpy as np import math import time import logging import threading from collections import deque from PyQt5.QtWidgets import ( QApplication, QMainWindow, QPushButton, QWidget, QVBoxLayout, QHBoxLayout, QMessageBox, QLabel, QFileDialog, QToolBox, QComboBox, QStatusBar, QGroupBox, QSlider, QDockWidget, QProgressDialog, QLineEdit, QRadioButton, QGridLayout, QSpinBox, QCheckBox, QDialog, QDialogButtonBox, QDoubleSpinBox, QProgressBar, ) from PyQt5.QtCore import QRect, Qt, QSettings, QThread, pyqtSignal, QTimer, QMetaObject, pyqtSlot,Q_ARG from PyQt5.QtGui import QImage, QPixmap from CamOperation_class import CameraOperation from MvCameraControl_class import * import ctypes from ctypes import cast, POINTER from datetime import datetime import skimage import platform from CameraParams_header import ( MV_GIGE_DEVICE, MV_USB_DEVICE, MV_GENTL_CAMERALINK_DEVICE, MV_GENTL_CXP_DEVICE, MV_GENTL_XOF_DEVICE ) # ===== 全局配置 ===== # 模板匹配参数 MATCH_THRESHOLD = 0.75 # 降低匹配置信度阈值以提高灵敏度 MIN_MATCH_COUNT = 10 # 最小匹配特征点数量 MIN_FRAME_INTERVAL = 0.1 # 最小检测间隔(秒) # ===== 全局变量 ===== current_sample_path = "" detection_history = [] isGrabbing = False isOpen = False obj_cam_operation = None frame_monitor_thread = None template_matcher_thread = None MV_OK = 0 MV_E_CALLORDER = -2147483647 # ==================== 优化后的质量检测算法 ==================== def enhanced_check_print_quality(sample_image_path, test_image, threshold=0.05): # 不再使用传感器数据调整阈值 adjusted_threshold = threshold try: sample_img_data = np.fromfile(sample_image_path, dtype=np.uint8) sample_image = cv2.imdecode(sample_img_data, cv2.IMREAD_GRAYSCALE) if sample_image is None: logging.error(f"无法解码样本图像: {sample_image_path}") return None, None, None except Exception as e: logging.exception(f"样本图像读取异常: {str(e)}") return None, None, None if len(test_image.shape) == 3: test_image_gray = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY) else: test_image_gray = test_image.copy() sample_image = cv2.GaussianBlur(sample_image, (5, 5), 0) test_image_gray = cv2.GaussianBlur(test_image_gray, (5, 5), 0) try: # 使用更鲁棒的SIFT特征检测器 sift = cv2.SIFT_create() keypoints1, descriptors1 = sift.detectAndCompute(sample_image, None) keypoints2, descriptors2 = sift.detectAndCompute(test_image_gray, None) if descriptors1 is None or descriptors2 is None: logging.warning("无法提取特征描述符,跳过配准") aligned_sample = sample_image else: # 使用FLANN匹配器提高匹配精度 FLANN_INDEX_KDTREE = 1 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(descriptors1, descriptors2, k=2) # 应用Lowe's比率测试筛选优质匹配 good_matches = [] for m, n in matches: if m.distance < 0.7 * n.distance: good_matches.append(m) if len(good_matches) > MIN_MATCH_COUNT: src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2) dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2) H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) if H is not None: aligned_sample = cv2.warpPerspective( sample_image, H, (test_image_gray.shape[1], test_image_gray.shape[0]) ) logging.info("图像配准成功,使用配准后样本") else: aligned_sample = sample_image logging.warning("无法计算单应性矩阵,使用原始样本") else: aligned_sample = sample_image logging.warning(f"特征点匹配不足({len(good_matches)}/{MIN_MATCH_COUNT}),跳过图像配准") except Exception as e: logging.error(f"图像配准失败: {str(e)}") aligned_sample = sample_image try: if aligned_sample.shape != test_image_gray.shape: test_image_gray = cv2.resize(test_image_gray, (aligned_sample.shape[1], aligned_sample.shape[0])) except Exception as e: logging.error(f"图像调整大小失败: {str(e)}") return None, None, None try: from skimage.metrics import structural_similarity as compare_ssim ssim_score, ssim_diff = compare_ssim( aligned_sample, test_image_gray, full=True, gaussian_weights=True, data_range=255 ) except ImportError: from skimage.measure import compare_ssim ssim_score, ssim_diff = compare_ssim( aligned_sample, test_image_gray, full=True, gaussian_weights=True ) except Exception as e: logging.error(f"SSIM计算失败: {str(e)}") abs_diff = cv2.absdiff(aligned_sample, test_image_gray) ssim_diff = abs_diff.astype(np.float32) / 255.0 ssim_score = 1.0 - np.mean(ssim_diff) ssim_diff = (1 - ssim_diff) * 255 abs_diff = cv2.absdiff(aligned_sample, test_image_gray) combined_diff = cv2.addWeighted(ssim_diff.astype(np.uint8), 0.7, abs_diff, 0.3, 0) _, thresholded = cv2.threshold(combined_diff, 30, 255, cv2.THRESH_BINARY) kernel = np.ones((3, 3), np.uint8) thresholded = cv2.morphologyEx(thresholded, cv2.MORPH_OPEN, kernel) thresholded = cv2.morphologyEx(thresholded, cv2.MORPH_CLOSE, kernel) diff_pixels = np.count_nonzero(thresholded) total_pixels = aligned_sample.size diff_ratio = diff_pixels / total_pixels is_qualified = diff_ratio <= adjusted_threshold marked_image = cv2.cvtColor(test_image_gray, cv2.COLOR_GRAY2BGR) marked_image[thresholded == 255] = [0, 0, 255] # 放大缺陷标记 scale_factor = 2.0 # 放大2倍 marked_image = cv2.resize(marked_image, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR) labels = skimage.measure.label(thresholded) properties = skimage.measure.regionprops(labels) for prop in properties: if prop.area > 50: y, x = prop.centroid # 根据放大比例调整坐标 x_scaled = int(x * scale_factor) y_scaled = int(y * scale_factor) cv2.putText(marked_image, f"Defect", (x_scaled, y_scaled), cv2.FONT_HERSHEY_SIMPLEX, 0.5 * scale_factor, (0, 255, 255), int(scale_factor)) return is_qualified, diff_ratio, marked_image # ==================== 视觉触发的质量检测流程 ==================== def vision_controlled_check(capture_image=None, match_score=0.0): """修改为接受图像帧和匹配分数""" global current_sample_path, detection_history logging.info("视觉触发质量检测启动") # 如果没有提供图像,使用当前帧 if capture_image is None: frame = obj_cam_operation.get_current_frame() else: frame = capture_image if frame is None: QMessageBox.warning(mainWindow, "错误", "无法获取当前帧图像!", QMessageBox.Ok) return progress = QProgressDialog("正在检测...", "取消", 0, 100, mainWindow) progress.setWindowModality(Qt.WindowModal) progress.setValue(10) try: diff_threshold = mainWindow.sliderDiffThreshold.value() / 100.0 logging.info(f"使用差异度阈值: {diff_threshold}") progress.setValue(30) is_qualified, diff_ratio, marked_image = enhanced_check_print_quality( current_sample_path, frame, threshold=diff_threshold ) progress.setValue(70) if is_qualified is None: QMessageBox.critical(mainWindow, "检测错误", "检测失败,请检查日志", QMessageBox.Ok) return logging.info(f"检测结果: 合格={is_qualified}, 差异={diff_ratio}") progress.setValue(90) update_diff_display(diff_ratio, is_qualified) result_text = f"印花是否合格: {'合格' if is_qualified else '不合格'}\n差异占比: {diff_ratio*100:.2f}%\n阈值: {diff_threshold*100:.2f}%" QMessageBox.information(mainWindow, "检测结果", result_text, QMessageBox.Ok) if marked_image is not None: # 创建可调整大小的窗口 cv2.namedWindow("缺陷标记结果", cv2.WINDOW_NORMAL) cv2.resizeWindow("缺陷标记结果", 800, 600) # 初始大小 cv2.imshow("缺陷标记结果", marked_image) cv2.waitKey(0) cv2.destroyAllWindows() detection_result = { 'timestamp': datetime.now(), 'qualified': is_qualified, 'diff_ratio': diff_ratio, 'threshold': diff_threshold, 'trigger_type': 'vision' if capture_image else 'manual' } detection_history.append(detection_result) update_history_display() progress.setValue(100) except Exception as e: logging.exception("印花检测失败") QMessageBox.critical(mainWindow, "检测错误", f"检测过程中发生错误: {str(e)}", QMessageBox.Ok) finally: progress.close() # ==================== 相机操作函数 ==================== def open_device(): global deviceList, nSelCamIndex, obj_cam_operation, isOpen, frame_monitor_thread, mainWindow if isOpen: QMessageBox.warning(mainWindow, "Error", '相机已打开!', QMessageBox.Ok) return MV_E_CALLORDER nSelCamIndex = mainWindow.ComboDevices.currentIndex() if nSelCamIndex < 0: QMessageBox.warning(mainWindow, "Error", '请选择相机!', QMessageBox.Ok) return MV_E_CALLORDER # 创建相机控制对象 cam = MvCamera() # 初始化相机操作对象 obj_cam_operation = CameraOperation(cam, deviceList, nSelCamIndex) ret = obj_cam_operation.open_device() if 0 != ret: strError = "打开设备失败 ret:" + ToHexStr(ret) QMessageBox.warning(mainWindow, "Error", strError, QMessageBox.Ok) isOpen = False else: set_continue_mode() get_param() isOpen = True enable_controls() # 创建并启动帧监控线程 frame_monitor_thread = FrameMonitorThread(obj_cam_operation) frame_monitor_thread.frame_status.connect(mainWindow.statusBar().showMessage) frame_monitor_thread.start() def start_grabbing(): global obj_cam_operation, isGrabbing, template_matcher_thread ret = obj_cam_operation.start_grabbing(mainWindow.widgetDisplay.winId()) if ret != 0: strError = "开始取流失败 ret:" + ToHexStr(ret) QMessageBox.warning(mainWindow, "Error", strError, QMessageBox.Ok) else: isGrabbing = True enable_controls() # 等待第一帧到达 QThread.msleep(500) if not obj_cam_operation.is_frame_available(): QMessageBox.warning(mainWindow, "警告", "开始取流后未接收到帧,请检查相机连接!", QMessageBox.Ok) # 如果启用了自动检测,启动检测线程 if mainWindow.chkContinuousMatch.isChecked(): toggle_template_matching(True) def stop_grabbing(): global obj_cam_operation, isGrabbing, template_matcher_thread ret = obj_cam_operation.Stop_grabbing() if ret != 0: strError = "停止取流失败 ret:" + ToHexStr(ret) QMessageBox.warning(mainWindow, "Error", strError, QMessageBox.Ok) else: isGrabbing = False enable_controls() # 停止模板匹配线程 if template_matcher_thread and template_matcher_thread.isRunning(): template_matcher_thread.stop() def close_device(): global isOpen, isGrabbing, obj_cam_operation, frame_monitor_thread, template_matcher_thread if frame_monitor_thread and frame_monitor_thread.isRunning(): frame_monitor_thread.stop() frame_monitor_thread.wait(2000) # 停止模板匹配线程 if template_matcher_thread and template_matcher_thread.isRunning(): template_matcher_thread.stop() template_matcher_thread.wait(2000) template_matcher_thread = None if isOpen and obj_cam_operation: obj_cam_operation.close_device() isOpen = False isGrabbing = False enable_controls() # ==================== 连续帧匹配检测器 ==================== class ContinuousFrameMatcher(QThread): frame_processed = pyqtSignal(np.ndarray, float, bool) # 处理后的帧, 匹配分数, 是否匹配 match_score_updated = pyqtSignal(float) # 匹配分数更新信号 match_success = pyqtSignal(np.ndarray, float) # 匹配成功信号 (帧, 匹配分数) def __init__(self, cam_operation, parent=None): super().__init__(parent) self.cam_operation = cam_operation self.running = True self.sample_template = None self.min_match_count = MIN_MATCH_COUNT self.match_threshold = MATCH_THRESHOLD self.sample_kp = None self.sample_des = None self.current_match_score = 0.0 self.last_match_time = 0 self.frame_counter = 0 self.consecutive_fail_count = 0 self.last_trigger_time = 0 # 上次触发时间 self.cool_down = 0.2 # 冷却时间(秒) # 特征检测器 - 使用SIFT self.sift = cv2.SIFT_create() # 特征匹配器 - 使用FLANN提高匹配精度 FLANN_INDEX_KDTREE = 1 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) self.flann = cv2.FlannBasedMatcher(index_params, search_params) # 性能监控 self.processing_times = deque(maxlen=100) self.frame_rates = deque(maxlen=100) # 黑白相机优化 self.clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8)) def preprocess_image(self, image): """增强黑白图像特征提取""" # 如果是单通道图像,转换为三通道 if len(image.shape) == 2: image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) # 对比度增强 (LAB空间) lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) l, a, b = cv2.split(lab) cl = self.clahe.apply(l) limg = cv2.merge((cl, a, b)) enhanced = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR) # 边缘增强 gray = cv2.cvtColor(enhanced, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 50, 150) # 组合特征 return cv2.addWeighted(enhanced, 0.7, cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR), 0.3, 0) def set_sample(self, sample_img): """设置标准样本并提取特征""" # 保存样本图像 self.sample_img = sample_img # 预处理增强特征 processed_sample = self.preprocess_image(sample_img) # 提取样本特征点 self.sample_kp, self.sample_des = self.sift.detectAndCompute(processed_sample, None) if self.sample_des is None or len(self.sample_kp) < self.min_match_count: logging.warning("样本特征点不足") return False logging.info(f"样本特征提取成功: {len(self.sample_kp)}个关键点") return True def process_frame(self, frame): """处理帧:特征提取、匹配和可视化""" is_matched = False match_score = 0.0 processed_frame = frame.copy() # 检查是否已设置样本 if self.sample_kp is None or self.sample_des is None: return processed_frame, match_score, is_matched # 预处理当前帧 processed_frame = self.preprocess_image(frame) # 转换为灰度图像用于特征提取 gray_frame = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2GRAY) try: # 提取当前帧的特征点 kp, des = self.sift.detectAndCompute(gray_frame, None) if des is None or len(kp) < 10: # 特征点不足 return processed_frame, match_score, is_matched # 匹配特征点 matches = self.flann.knnMatch(self.sample_des, des, k=2) # 应用Lowe's比率测试 good_matches = [] for m, n in matches: if m.distance < 0.7 * n.distance: good_matches.append(m) # 计算匹配分数(匹配点数量占样本特征点数量的比例) if len(self.sample_kp) > 0: match_score = len(good_matches) / len(self.sample_kp) match_score = min(1.0, max(0.0, match_score)) # 确保在0-1范围内 else: match_score = 0.0 # 判断是否匹配成功 if len(good_matches) >= self.min_match_count and match_score >= self.match_threshold: is_matched = True # 在图像上绘制匹配结果 if len(gray_frame.shape) == 2: processed_frame = cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2BGR) # 绘制匹配点 processed_frame = cv2.drawMatches( self.sample_img, self.sample_kp, processed_frame, kp, good_matches, None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS ) # 在图像上显示匹配分数 cv2.putText(processed_frame, f"Match Score: {match_score:.2f}", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) # 更新当前匹配分数 self.current_match_score = match_score self.match_score_updated.emit(match_score) # 检查是否匹配成功且超过冷却时间 current_time = time.time() if is_matched and (current_time - self.last_trigger_time) > self.cool_down: self.last_trigger_time = current_time logging.info(f"匹配成功! 分数: {match_score:.2f}, 触发质量检测") # 发出匹配成功信号 (传递当前帧) self.match_success.emit(frame.copy(), match_score) except Exception as e: logging.error(f"帧处理错误: {str(e)}") return processed_frame, match_score, is_matched def set_threshold(self, threshold): """更新匹配阈值""" self.match_threshold = max(0.0, min(1.0, threshold)) logging.info(f"更新匹配阈值: {self.match_threshold:.2f}") def run(self): """主处理循环 - 连续处理每一帧""" logging.info("连续帧匹配线程启动") self.last_match_time = time.time() self.consecutive_fail_count = 0 while self.running: start_time = time.time() # 检查相机状态 if not self.cam_operation or not self.cam_operation.is_grabbing: if self.consecutive_fail_count % 10 == 0: logging.debug("相机未取流,等待...") time.sleep(0.1) self.consecutive_fail_count += 1 continue # 获取当前帧 frame = self.cam_operation.get_current_frame() if frame is None: self.consecutive_fail_count += 1 if self.consecutive_fail_count % 10 == 0: logging.warning(f"连续{self.consecutive_fail_count}次获取帧失败") time.sleep(0.05) continue self.consecutive_fail_count = 0 try: # 处理帧 processed_frame, match_score, is_matched = self.process_frame(frame) # 发送处理结果 self.frame_processed.emit(processed_frame, match_score, is_matched) except Exception as e: logging.error(f"帧处理错误: {str(e)}") # 控制处理频率 processing_time = time.time() - start_time sleep_time = max(0.01, MIN_FRAME_INTERVAL - processing_time) time.sleep(sleep_time) logging.info("连续帧匹配线程退出") # ==================== 模板匹配控制函数 ==================== def toggle_template_matching(state): global template_matcher_thread, current_sample_path logging.debug(f"切换连续匹配状态: {state}") if state == Qt.Checked and isGrabbing: # 确保已设置样本 if not current_sample_path: logging.warning("尝试启动连续匹配但未设置样本") QMessageBox.warning(mainWindow, "错误", "请先设置标准样本", QMessageBox.Ok) mainWindow.chkContinuousMatch.setChecked(False) return if template_matcher_thread is None: logging.info("创建新的连续帧匹配线程") template_matcher_thread = ContinuousFrameMatcher(obj_cam_operation) template_matcher_thread.frame_processed.connect(update_frame_display) template_matcher_thread.match_score_updated.connect(update_match_score_display) # 正确连接匹配成功信号到质量检测函数 template_matcher_thread.match_success.connect( lambda frame, score: vision_controlled_check(frame, score) ) # 加载样本图像 sample_img = cv2.imread(current_sample_path) if sample_img is None: logging.error("无法加载标准样本图像") QMessageBox.warning(mainWindow, "错误", "无法加载标准样本图像", QMessageBox.Ok) mainWindow.chkContinuousMatch.setChecked(False) return if not template_matcher_thread.set_sample(sample_img): logging.warning("标准样本特征不足") QMessageBox.warning(mainWindow, "错误", "标准样本特征不足", QMessageBox.Ok) mainWindow.chkContinuousMatch.setChecked(False) return if not template_matcher_thread.isRunning(): logging.info("启动连续帧匹配线程") template_matcher_thread.start() elif template_matcher_thread and template_matcher_thread.isRunning(): logging.info("停止连续帧匹配线程") template_matcher_thread.stop() # 重置匹配分数显示 update_match_score_display(0.0) # 重置帧显示 if obj_cam_operation and obj_cam_operation.is_frame_available(): frame = obj_cam_operation.get_current_frame() if frame is not None: display_frame = frame.copy() # 添加状态信息 cv2.putText(display_frame, "Continuous Matching Disabled", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2) update_frame_display(display_frame, 0.0, False) # 添加类型转换函数 def numpy_to_qimage(np_array): """将 numpy 数组转换为 QImage""" if np_array is None: return QImage() height, width, channel = np_array.shape bytes_per_line = 3 * width # 确保数据是连续的 if not np_array.flags['C_CONTIGUOUS']: np_array = np.ascontiguousarray(np_array) # 转换 BGR 到 RGB rgb_image = cv2.cvtColor(np_array, cv2.COLOR_BGR2RGB) # 创建 QImage qimg = QImage( rgb_image.data, width, height, bytes_per_line, QImage.Format_RGB888 ) # 复制数据以避免内存问题 return qimg.copy() # 修改 update_frame_display 函数 def update_frame_display(frame, match_score, is_matched): """更新主显示窗口(线程安全)""" # 确保在GUI线程中执行 if QThread.currentThread() != QApplication.instance().thread(): # 转换为 QImage 再传递 qimg = numpy_to_qimage(frame) QMetaObject.invokeMethod( mainWindow, "updateDisplay", Qt.QueuedConnection, Q_ARG(QImage, qimg), Q_ARG(float, match_score), Q_ARG(bool, is_matched) ) return # 如果已经在主线程,直接调用主窗口的更新方法 mainWindow.updateDisplay(frame, match_score, is_matched) def update_match_score_display(score): """更新匹配分数显示""" # 将分数转换为百分比显示 score_percent = score * 100 mainWindow.lblMatchScoreValue.setText(f"{score_percent:.1f}%") # 根据分数设置颜色 if score > 0.8: # 高于80%显示绿色 color = "green" elif score > 0.6: # 60%-80%显示黄色 color = "orange" else: # 低于60%显示红色 color = "red" mainWindow.lblMatchScoreValue.setStyleSheet(f"color: {color}; font-weight: bold;") def update_diff_display(diff_ratio, is_qualified): mainWindow.lblCurrentDiff.setText(f"当前差异度: {diff_ratio*100:.2f}%") if is_qualified: mainWindow.lblDiffStatus.setText("状态: 合格") mainWindow.lblDiffStatus.setStyleSheet("color: green; font-size: 12px;") else: mainWindow.lblDiffStatus.setText("状态: 不合格") mainWindow.lblDiffStatus.setStyleSheet("color: red; font-size: 12px;") def update_diff_threshold(value): mainWindow.lblDiffValue.setText(f"{value}%") def update_sample_display(): global current_sample_path if current_sample_path: mainWindow.lblSamplePath.setText(f"当前样本: {os.path.basename(current_sample_path)}") mainWindow.lblSamplePath.setToolTip(current_sample_path) mainWindow.bnPreviewSample.setEnabled(True) else: mainWindow.lblSamplePath.setText("当前样本: 未设置样本") mainWindow.bnPreviewSample.setEnabled(False) def update_history_display(): global detection_history mainWindow.cbHistory.clear() for i, result in enumerate(detection_history[-10:]): timestamp = result['timestamp'].strftime("%H:%M:%S") status = "合格" if result['qualified'] else "不合格" ratio = f"{result['diff_ratio']*100:.2f}%" trigger = "视觉" if result['trigger_type'] == 'vision' else "手动" mainWindow.cbHistory.addItem(f"[{trigger} {timestamp}] {status} - 差异: {ratio}") def update_match_threshold(value): """更新匹配阈值显示并应用到匹配器""" global template_matcher_thread # 更新UI显示 if mainWindow: mainWindow.lblThresholdValue.setText(f"{value}%") # 如果匹配线程存在,更新其匹配阈值 if template_matcher_thread: # 转换为0-1范围的浮点数 threshold = value / 100.0 template_matcher_thread.set_threshold(threshold) logging.debug(f"更新匹配阈值: {threshold:.2f}") # ==================== 主窗口类 ==================== class MainWindow(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("布料印花检测系统 - 连续匹配版") self.resize(1200, 800) central_widget = QWidget() self.setCentralWidget(central_widget) main_layout = QVBoxLayout(central_widget) # 设备枚举区域 device_layout = QHBoxLayout() self.ComboDevices = QComboBox() self.bnEnum = QPushButton("枚举设备") self.bnOpen = QPushButton("打开设备") self.bnClose = QPushButton("关闭设备") device_layout.addWidget(self.ComboDevices) device_layout.addWidget(self.bnEnum) device_layout.addWidget(self.bnOpen) device_layout.addWidget(self.bnClose) main_layout.addLayout(device_layout) # 取流控制组 self.groupGrab = QGroupBox("取流控制") grab_layout = QHBoxLayout(self.groupGrab) self.bnStart = QPushButton("开始取流") self.bnStop = QPushButton("停止取流") self.radioContinueMode = QRadioButton("连续模式") self.radioTriggerMode = QRadioButton("触发模式") self.bnSoftwareTrigger = QPushButton("软触发") grab_layout.addWidget(self.bnStart) grab_layout.addWidget(self.bnStop) grab_layout.addWidget(self.radioContinueMode) grab_layout.addWidget(self.radioTriggerMode) grab_layout.addWidget(self.bnSoftwareTrigger) main_layout.addWidget(self.groupGrab) # 参数设置组 self.paramgroup = QGroupBox("相机参数") param_layout = QGridLayout(self.paramgroup) self.edtExposureTime = QLineEdit() self.edtGain = QLineEdit() self.edtFrameRate = QLineEdit() self.bnGetParam = QPushButton("获取参数") self.bnSetParam = QPushButton("设置参数") self.bnSaveImage = QPushButton("保存图像") param_layout.addWidget(QLabel("曝光时间:"), 0, 0) param_layout.addWidget(self.edtExposureTime, 0, 1) param_layout.addWidget(self.bnGetParam, 0, 2) param_layout.addWidget(QLabel("增益:"), 1, 0) param_layout.addWidget(self.edtGain, 1, 1) param_layout.addWidget(self.bnSetParam, 1, 2) param_layout.addWidget(QLabel("帧率:"), 2, 0) param_layout.addWidget(self.edtFrameRate, 2, 1) param_layout.addWidget(self.bnSaveImage, 2, 2) main_layout.addWidget(self.paramgroup) # 图像显示区域 self.widgetDisplay = QLabel() self.widgetDisplay.setMinimumSize(640, 480) self.widgetDisplay.setStyleSheet("background-color: black;") self.widgetDisplay.setAlignment(Qt.AlignCenter) self.widgetDisplay.setText("相机预览区域") main_layout.addWidget(self.widgetDisplay, 1) # 创建自定义UI组件 self.setup_custom_ui() # 添加阈值自适应定时器 self.threshold_timer = QTimer() self.threshold_timer.timeout.connect(self.auto_adjust_threshold) self.threshold_timer.start(2000) # 每2秒调整一次 def auto_adjust_threshold(self): """根据环境亮度自动调整匹配阈值""" if not obj_cam_operation or not isGrabbing: return # 获取当前帧并计算平均亮度 frame = obj_cam_operation.get_current_frame() if frame is None: return gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) brightness = np.mean(gray) # 根据亮度动态调整阈值 (亮度低时降低阈值要求) if brightness < 50: # 暗环境 new_threshold = 40 # 40% elif brightness > 200: # 亮环境 new_threshold = 65 # 65% else: # 正常环境 new_threshold = 55 # 55% # 更新UI self.sliderThreshold.setValue(new_threshold) self.lblThresholdValue.setText(f"{new_threshold}%") # 更新匹配器阈值 update_match_threshold(new_threshold) # 状态栏显示调整信息 self.statusBar().showMessage(f"亮度: {brightness:.1f}, 自动调整阈值至: {new_threshold}%", 3000) # 处理不同通道数的图像 if len(frame.shape) == 2 or frame.shape[2] == 1: # 已经是灰度图 gray = frame elif frame.shape[2] == 3: # 三通道彩色图 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) elif frame.shape[2] == 4: # 四通道图(带alpha) gray = cv2.cvtColor(frame, cv2.COLOR_BGRA2GRAY) else: # 其他通道数,无法处理 logging.warning(f"无法处理的图像格式: {frame.shape}") return # 计算平均亮度 brightness = np.mean(gray) def setup_custom_ui(self): # 工具栏 toolbar = self.addToolBar("检测工具") self.bnCheckPrint = QPushButton("手动检测") self.bnSaveSample = QPushButton("保存标准样本") self.bnPreviewSample = QPushButton("预览样本") self.cbHistory = QComboBox() self.cbHistory.setMinimumWidth(300) toolbar.addWidget(self.bnCheckPrint) toolbar.addWidget(self.bnSaveSample) toolbar.addWidget(self.bnPreviewSample) toolbar.addWidget(QLabel("历史记录:")) toolbar.addWidget(self.cbHistory) # 状态栏样本路径 self.lblSamplePath = QLabel("当前样本: 未设置样本") self.statusBar().addPermanentWidget(self.lblSamplePath) # 右侧面板 right_panel = QWidget() right_layout = QVBoxLayout(right_panel) right_layout.setContentsMargins(10, 10, 10, 10) # 差异度调整组 diff_group = QGroupBox("差异度调整") diff_layout = QVBoxLayout(diff_group) self.lblDiffThreshold = QLabel("差异度阈值 (0-100%):") self.sliderDiffThreshold = QSlider(Qt.Horizontal) self.sliderDiffThreshold.setRange(0, 100) self.sliderDiffThreshold.setValue(5) self.lblDiffValue = QLabel("5%") self.lblCurrentDiff = QLabel("当前差异度: -") self.lblCurrentDiff.setStyleSheet("font-size: 14px; font-weight: bold;") self.lblDiffStatus = QLabel("状态: 未检测") self.lblDiffStatus.setStyleSheet("font-size: 12px;") diff_layout.addWidget(self.lblDiffThreshold) diff_layout.addWidget(self.sliderDiffThreshold) diff_layout.addWidget(self.lblDiffValue) diff_layout.addWidget(self.lblCurrentDiff) diff_layout.addWidget(self.lblDiffStatus) right_layout.addWidget(diff_group) # ===== 连续匹配面板 ===== match_group = QGroupBox("连续帧匹配") match_layout = QVBoxLayout(match_group) # 样本设置 sample_layout = QHBoxLayout() self.bnSetSample = QPushButton("设置标准样本") self.bnPreviewSample = QPushButton("预览样本") self.lblSampleStatus = QLabel("状态: 未设置样本") sample_layout.addWidget(self.bnSetSample) sample_layout.addWidget(self.bnPreviewSample) sample_layout.addWidget(self.lblSampleStatus) match_layout.addLayout(sample_layout) # 匹配参数 param_layout = QHBoxLayout() self.lblMatchThreshold = QLabel("匹配阈值:") self.sliderThreshold = QSlider(Qt.Horizontal) self.sliderThreshold.setRange(50, 100) self.sliderThreshold.setValue(75) # 降低默认阈值 self.lblThresholdValue = QLabel("75%") param_layout.addWidget(self.lblMatchThreshold) param_layout.addWidget(self.sliderThreshold) param_layout.addWidget(self.lblThresholdValue) match_layout.addLayout(param_layout) # 匹配分数显示 match_score_layout = QHBoxLayout() self.lblMatchScore = QLabel("实时匹配分数:") self.lblMatchScoreValue = QLabel("0.0%") self.lblMatchScoreValue.setStyleSheet("font-weight: bold;") match_score_layout.addWidget(self.lblMatchScore) match_score_layout.addWidget(self.lblMatchScoreValue) match_layout.addLayout(match_score_layout) # 连续匹配开关 self.chkContinuousMatch = QCheckBox("启用连续帧匹配") self.chkContinuousMatch.setChecked(False) match_layout.addWidget(self.chkContinuousMatch) right_layout.addWidget(match_group) right_layout.addStretch(1) # 停靠窗口 dock = QDockWidget("检测控制面板", self) dock.setWidget(right_panel) dock.setFeatures(QDockWidget.DockWidgetMovable | QDockWidget.DockWidgetFloatable) self.addDockWidget(Qt.RightDockWidgetArea, dock) @pyqtSlot(QImage, float, bool) def updateDisplay(self, qimg, match_score, is_matched): """线程安全的显示更新方法(只接收 QImage)""" if qimg.isNull(): return # 创建QPixmap并缩放 pixmap = QPixmap.fromImage(qimg) scaled_pixmap = pixmap.scaled( self.widgetDisplay.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation ) # 更新显示 self.widgetDisplay.setPixmap(scaled_pixmap) self.widgetDisplay.setAlignment(Qt.AlignCenter) # 更新显示 self.widgetDisplay.setPixmap(scaled_pixmap) self.widgetDisplay.setAlignment(Qt.AlignCenter) def closeEvent(self, event): logging.info("主窗口关闭,执行清理...") close_device() event.accept() # ===== 辅助函数 ===== def ToHexStr(num): if not isinstance(num, int): try: num = int(num) except: return f"<非整数:{type(num)}>" chaDic = {10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f'} hexStr = "" if num < 0: num = num + 2 ** 32 while num >= 16: digit = num % 16 hexStr = chaDic.get(digit, str(digit)) + hexStr num //= 16 hexStr = chaDic.get(num, str(num)) + hexStr return "0x" + hexStr def enum_devices(): global deviceList, obj_cam_operation n_layer_type = ( MV_GIGE_DEVICE | MV_USB_DEVICE | MV_GENTL_CAMERALINK_DEVICE | MV_GENTL_CXP_DEVICE | MV_GENTL_XOF_DEVICE ) # 创建设备列表 deviceList = MV_CC_DEVICE_INFO_LIST() # 枚举设备 ret = MvCamera.MV_CC_EnumDevices(n_layer_type, deviceList) if ret != MV_OK: error_msg = f"枚举设备失败! 错误码: 0x{ret:x}" logging.error(error_msg) QMessageBox.warning(mainWindow, "错误", error_msg, QMessageBox.Ok) return ret if deviceList.nDeviceNum == 0: QMessageBox.warning(mainWindow, "提示", "未找到任何设备", QMessageBox.Ok) return MV_OK logging.info(f"找到 {deviceList.nDeviceNum} 个设备") # 处理设备信息 devList = [] for i in range(deviceList.nDeviceNum): # 获取设备信息 mvcc_dev_info = ctypes.cast( deviceList.pDeviceInfo[i], ctypes.POINTER(MV_CC_DEVICE_INFO) ).contents # 根据设备类型提取信息 if mvcc_dev_info.nTLayerType == MV_GIGE_DEVICE: st_gige_info = mvcc_dev_info.SpecialInfo.stGigEInfo ip_addr = ( f"{(st_gige_info.nCurrentIp >> 24) & 0xFF}." f"{(st_gige_info.nCurrentIp >> 16) & 0xFF}." f"{(st_gige_info.nCurrentIp >> 8) & 0xFF}." f"{st_gige_info.nCurrentIp & 0xFF}" ) # 修复:将c_ubyte_Array_16转换为字节串再解码 user_defined_bytes = bytes(st_gige_info.chUserDefinedName) dev_name = f"GigE: {user_defined_bytes.decode('gbk', 'ignore')}" devList.append(f"[{i}] {dev_name} ({ip_addr})") elif mvcc_dev_info.nTLayerType == MV_USB_DEVICE: st_usb_info = mvcc_dev_info.SpecialInfo.stUsb3VInfo serial = bytes(st_usb_info.chSerialNumber).decode('ascii', 'ignore').rstrip('\x00') # 修复:同样处理用户自定义名称 user_defined_bytes = bytes(st_usb_info.chUserDefinedName) dev_name = f"USB: {user_defined_bytes.decode('gbk', 'ignore')}" devList.append(f"[{i}] {dev_name} (SN: {serial})") else: devList.append(f"[{i}] 未知设备类型: {mvcc_dev_info.nTLayerType}") # 更新UI mainWindow.ComboDevices.clear() mainWindow.ComboDevices.addItems(devList) if devList: mainWindow.ComboDevices.setCurrentIndex(0) mainWindow.statusBar().showMessage(f"找到 {deviceList.nDeviceNum} 个设备", 3000) return MV_OK def set_continue_mode(): ret = obj_cam_operation.set_trigger_mode(False) if ret != 0: strError = "设置连续模式失败 ret:" + ToHexStr(ret) QMessageBox.warning(mainWindow, "Error", strError, QMessageBox.Ok) else: mainWindow.radioContinueMode.setChecked(True) mainWindow.radioTriggerMode.setChecked(False) mainWindow.bnSoftwareTrigger.setEnabled(False) def set_software_trigger_mode(): ret = obj_cam_operation.set_trigger_mode(True) if ret != 0: strError = "设置触发模式失败 ret:" + ToHexStr(ret) QMessageBox.warning(mainWindow, "Error", strError, QMessageBox.Ok) else: mainWindow.radioContinueMode.setChecked(False) mainWindow.radioTriggerMode.setChecked(True) mainWindow.bnSoftwareTrigger.setEnabled(isGrabbing) def trigger_once(): ret = obj_cam_operation.trigger_once() if ret != 0: strError = "软触发失败 ret:" + ToHexStr(ret) QMessageBox.warning(mainWindow, "Error", strError, QMessageBox.Ok) def save_sample_image(): global isGrabbing, obj_cam_operation, current_sample_path if not isGrabbing: QMessageBox.warning(mainWindow, "错误", "请先开始取流并捕获图像!", QMessageBox.Ok) return # 尝试捕获当前帧 frame = obj_cam_operation.capture_frame() if frame is None: QMessageBox.warning(mainWindow, "无有效图像", "未捕获到有效图像,请检查相机状态!", QMessageBox.Ok) return # 确保图像有效 if frame.size == 0 or frame.shape[0] == 0 or frame.shape[1] == 0: QMessageBox.warning(mainWindow, "无效图像", "捕获的图像无效,请检查相机设置!", QMessageBox.Ok) return settings = QSettings("ClothInspection", "CameraApp") last_dir = settings.value("last_save_dir", os.path.join(os.getcwd(), "captures")) timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") default_filename = f"sample_{timestamp}" file_path, selected_filter = QFileDialog.getSaveFileName( mainWindow, "保存标准样本图像", os.path.join(last_dir, default_filename), "BMP Files (*.bmp);;PNG Files (*.png);;JPEG Files (*.jpg);;所有文件 (*)", options=QFileDialog.DontUseNativeDialog ) if not file_path: return # 确保文件扩展名正确 file_extension = os.path.splitext(file_path)[1].lower() if not file_extension: if "BMP" in selected_filter: file_path += ".bmp" elif "PNG" in selected_filter: file_path += ".png" elif "JPEG" in selected_filter or "JPG" in selected_filter: file_path += ".jpg" else: file_path += ".bmp" file_extension = os.path.splitext(file_path)[1].lower() # 创建目录(如果不存在) directory = os.path.dirname(file_path) if directory and not os.path.exists(directory): try: os.makedirs(directory, exist_ok=True) except OSError as e: QMessageBox.critical(mainWindow, "目录创建错误", f"无法创建目录 {directory}: {str(e)}", QMessageBox.Ok) return # 保存图像 try: # 使用OpenCV保存图像 if not cv2.imwrite(file_path, frame): raise Exception("OpenCV保存失败") # 更新状态 current_sample_path = file_path update_sample_display() settings.setValue("last_save_dir", os.path.dirname(file_path)) # 显示成功消息 QMessageBox.information(mainWindow, "成功", f"标准样本已保存至:\n{file_path}", QMessageBox.Ok) # 更新样本状态 mainWindow.lblSampleStatus.setText("状态: 样本已设置") mainWindow.lblSampleStatus.setStyleSheet("color: green;") except Exception as e: logging.error(f"保存图像失败: {str(e)}") QMessageBox.critical(mainWindow, "保存错误", f"保存图像时发生错误:\n{str(e)}", QMessageBox.Ok) def preview_sample(): global current_sample_path if not current_sample_path or not os.path.exists(current_sample_path): QMessageBox.warning(mainWindow, "错误", "请先设置有效的标准样本图像!", QMessageBox.Ok) return try: # 直接使用OpenCV加载图像 sample_img = cv2.imread(current_sample_path) if sample_img is None: raise Exception("无法加载图像") # 显示图像 cv2.namedWindow("标准样本预览", cv2.WINDOW_NORMAL) cv2.resizeWindow("标准样本预览", 800, 600) cv2.imshow("标准样本预览", sample_img) cv2.waitKey(0) cv2.destroyAllWindows() except Exception as e: QMessageBox.warning(mainWindow, "错误", f"预览样本失败: {str(e)}", QMessageBox.Ok) def is_float(str): try: float(str) return True except ValueError: return False def get_param(): try: ret = obj_cam_operation.get_parameters() if ret != MV_OK: strError = "获取参数失败,错误码: " + ToHexStr(ret) QMessageBox.warning(mainWindow, "错误", strError, QMessageBox.Ok) else: mainWindow.edtExposureTime.setText("{0:.2f}".format(obj_cam_operation.exposure_time)) mainWindow.edtGain.setText("{0:.2f}".format(obj_cam_operation.gain)) mainWindow.edtFrameRate.setText("{0:.2f}".format(obj_cam_operation.frame_rate)) except Exception as e: error_msg = f"获取参数时发生错误: {str(e)}" QMessageBox.critical(mainWindow, "严重错误", error_msg, QMessageBox.Ok) def set_param(): frame_rate = mainWindow.edtFrameRate.text() exposure = mainWindow.edtExposureTime.text() gain = mainWindow.edtGain.text() if not (is_float(frame_rate) and is_float(exposure) and is_float(gain)): strError = "设置参数失败: 参数必须是有效的浮点数" QMessageBox.warning(mainWindow, "错误", strError, QMessageBox.Ok) return MV_E_PARAMETER try: ret = obj_cam_operation.set_param( frame_rate=float(frame_rate), exposure_time=float(exposure), gain=float(gain) ) if ret != MV_OK: strError = "设置参数失败,错误码: " + ToHexStr(ret) QMessageBox.warning(mainWindow, "错误", strError, QMessageBox.Ok) except Exception as e: error_msg = f"设置参数时发生错误: {str(e)}" QMessageBox.critical(mainWindow, "严重错误", error_msg, QMessageBox.Ok) def enable_controls(): global isGrabbing, isOpen mainWindow.groupGrab.setEnabled(isOpen) mainWindow.paramgroup.setEnabled(isOpen) mainWindow.bnOpen.setEnabled(not isOpen) mainWindow.bnClose.setEnabled(isOpen) mainWindow.bnStart.setEnabled(isOpen and (not isGrabbing)) mainWindow.bnStop.setEnabled(isOpen and isGrabbing) mainWindow.bnSoftwareTrigger.setEnabled(isGrabbing and mainWindow.radioTriggerMode.isChecked()) mainWindow.bnSaveImage.setEnabled(isOpen and isGrabbing) mainWindow.bnCheckPrint.setEnabled(isOpen and isGrabbing) mainWindow.bnSaveSample.setEnabled(isOpen and isGrabbing) mainWindow.bnPreviewSample.setEnabled(bool(current_sample_path)) # 连续匹配控制 mainWindow.chkContinuousMatch.setEnabled(bool(current_sample_path) and isGrabbing) # ===== 相机帧监控线程 ===== class FrameMonitorThread(QThread): frame_status = pyqtSignal(str) # 用于发送状态消息的信号 def __init__(self, cam_operation): super().__init__() self.cam_operation = cam_operation self.running = True self.frame_count = 0 self.last_time = time.time() def run(self): """监控相机帧状态的主循环""" while self.running: try: if self.cam_operation and self.cam_operation.is_grabbing: # 获取帧统计信息 frame_info = self.get_frame_info() if frame_info: fps = frame_info.get('fps', 0) dropped = frame_info.get('dropped', 0) status = f"FPS: {fps:.1f} | 丢帧: {dropped}" self.frame_status.emit(status) else: self.frame_status.emit("取流中...") else: self.frame_status.emit("相机未取流") except Exception as e: self.frame_status.emit(f"监控错误: {str(e)}") # 每500ms检查一次 QThread.msleep(500) def stop(self): """停止监控线程""" self.running = False self.wait(1000) # 等待线程结束 def calculate_fps(self): """计算当前帧率""" current_time = time.time() elapsed = current_time - self.last_time if elapsed > 0: fps = self.frame_count / elapsed self.frame_count = 0 self.last_time = current_time return fps return 0 def get_frame_info(self): """获取帧信息""" try: # 更新帧计数 self.frame_count += 1 # 返回帧信息 return { 'fps': self.calculate_fps(), 'dropped': 0 # 实际应用中需要从相机获取真实丢帧数 } except Exception as e: logging.error(f"获取帧信息失败: {str(e)}") return None # ===== 主程序入口 ===== if __name__ == "__main__": # 配置日志系统 logging.basicConfig( level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler("cloth_inspection_continuous.log"), logging.StreamHandler() ] ) logging.info("布料印花检测系统(连续匹配版)启动") app = QApplication(sys.argv) mainWindow = MainWindow() mainWindow.sliderThreshold.valueChanged.connect( lambda value: update_match_threshold(value) ) # 信号连接 mainWindow.sliderDiffThreshold.valueChanged.connect(update_diff_threshold) mainWindow.bnCheckPrint.clicked.connect(lambda: vision_controlled_check(None)) mainWindow.bnSaveSample.clicked.connect(save_sample_image) mainWindow.bnPreviewSample.clicked.connect(preview_sample) mainWindow.bnEnum.clicked.connect(enum_devices) mainWindow.bnOpen.clicked.connect(open_device) mainWindow.bnClose.clicked.connect(close_device) mainWindow.bnStart.clicked.connect(start_grabbing) mainWindow.bnStop.clicked.connect(stop_grabbing) mainWindow.bnSoftwareTrigger.clicked.connect(trigger_once) mainWindow.radioTriggerMode.clicked.connect(set_software_trigger_mode) mainWindow.radioContinueMode.clicked.connect(set_continue_mode) mainWindow.bnGetParam.clicked.connect(get_param) mainWindow.bnSetParam.clicked.connect(set_param) mainWindow.bnSaveImage.clicked.connect(save_sample_image) # 连续匹配信号连接 mainWindow.sliderThreshold.valueChanged.connect(update_match_score_display) mainWindow.chkContinuousMatch.stateChanged.connect(toggle_template_matching) mainWindow.show() app.exec_() close_device() sys.exit()
07-17
import cv2 import numpy as np from collections import deque def preprocess_maze_image(image_path): img = cv2.imread(image_path) if img is None: raise ValueError(f"无法读取图像: {image_path}") gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) binary = cv2.adaptiveThreshold( gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2 ) kernel = np.ones((3, 3), np.uint8) cleaned = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel) cleaned = cv2.morphologyEx(cleaned, cv2.MORPH_CLOSE, kernel) return img, cleaned def detect_maze_structure(binary_img): contours, _ = cv2.findContours( binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) max_contour = max(contours, key=cv2.contourArea) x, y, w, h = cv2.boundingRect(max_contour) maze_roi = binary_img[y:y+h, x:x+w] horizontal_projection = np.sum(maze_roi, axis=1) // 255 vertical_projection = np.sum(maze_roi, axis=0) // 255 rows = np.count_nonzero(horizontal_projection > 0.5 * np.max(horizontal_projection)) cols = np.count_nonzero(vertical_projection > 0.5 * np.max(vertical_projection)) cell_h = h // rows cell_w = w // cols start_x = x + cell_w // 2 start_y = y + h - cell_h // 2 end_x = x + w - cell_w // 2 end_y = y + cell_h // 2 start_cell = (rows - 1, 0) end_cell = (0, cols - 1) return { 'roi': (x, y, w, h), 'grid_size': (rows, cols), 'cell_size': (cell_h, cell_w), 'start': (start_x, start_y), 'end': (end_x, end_y), 'start_cell': start_cell, 'end_cell': end_cell } def create_maze_grid(binary_img, maze_info): x, y, w, h = maze_info['roi'] rows, cols = maze_info['grid_size'] cell_h, cell_w = maze_info['cell_size'] grid = np.zeros((rows, cols), dtype=np.uint8) for r in range(rows): for c in range(cols): cell_y = y + r * cell_h + cell_h // 4 cell_x = x + c * cell_w + cell_w // 4 roi_h = cell_h // 2 roi_w = cell_w // 2 cell_region = binary_img[ cell_y:cell_y+roi_h, cell_x:cell_x+roi_w ] if np.mean(cell_region) > 25: grid[r, c] = 1 return grid def bfs_pathfinding(grid, start, end): rows, cols = grid.shape directions = [(0, 1), (1, 0), (0, -1), (-1, 0)] queue = deque([(start, [start])]) visited = np.zeros_like(grid, dtype=bool) visited[start] = True while queue: (r, c), path = queue.popleft() if (r, c) == end: return path for dr, dc in directions: nr, nc = r + dr, c + dc if (0 <= nr < rows and 0 <= nc < cols and not visited[nr, nc] and grid[nr, nc] == 0): visited[nr, nc] = True queue.append(((nr, nc), path + [(nr, nc)])) return [] def visualize_results(original_img, maze_info, grid, path): x, y, w, h = maze_info['roi'] rows, cols = maze_info['grid_size'] cell_h, cell_w = maze_info['cell_size'] result_img = original_img.copy() cv2.rectangle(result_img, (x, y), (x+w, y+h), (0, 100, 255), 2) cv2.circle(result_img, maze_info['start'], 8, (0, 0, 255), -1) # 红色起点 cv2.circle(result_img, maze_info['end'], 8, (255, 0, 0), -1) # 蓝色终点 if path: for i in range(1, len(path)): r1, c1 = path[i-1] r2, c2 = path[i] y1 = y + r1 * cell_h + cell_h // 2 x1 = x + c1 * cell_w + cell_w // 2 y2 = y + r2 * cell_h + cell_h // 2 x2 = x + c2 * cell_w + cell_w // 2 cv2.line(result_img, (x1, y1), (x2, y2), (0, 255, 0), 2) return result_img def process_maze_image(image_path, output_path="output.png"): try: # 1. 图像预处理 original, binary = preprocess_maze_image(image_path) # 2. 检测迷宫结构 maze_info = detect_maze_structure(binary) # 3. 创建迷宫网格 grid = create_maze_grid(binary, maze_info) path = bfs_pathfinding( grid, maze_info['start_cell'], maze_info['end_cell'] ) if not path: print("警告:未找到从起点到终点的路径!") result_img = visualize_results(original, maze_info, grid, path) cv2.imwrite(output_path, result_img) print(f"处理完成!结果已保存至: {output_path}") return result_img except Exception as e: print(f"处理过程中出错: {str(e)}") return None if __name__ == "__main__": input_image = "maze.png" output_image = "solved_maze.png" result = process_maze_image(input_image, output_image) if result is not None: cv2.namedWindow("Solved Maze",cv2.WINDOW_NORMAL) cv2.imshow("Solved Maze", result) cv2.waitKey(0) cv2.destroyAllWindows()不改动代码,解释代码的步骤及语句用法
07-16
评论 1
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值