minMaxLoc、rowRange、colRange

void minMaxLoc(InputArray src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, InputArray mask=noArray())

src:输入图像。

minVal:最小值,可輸入NULL表示不需要。

maxVal :最大值,可輸入NULL表示不需要。

minLoc:最小值的位置,可输入NULL表示不需要,Point类型。

maxLoc:最大值的位置,可输入NULL表示不需要,Point类型。

mask:可有可无的掩模。

Mat mat = (Mat_<float>(4, 4) << 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);

double min = 0;
double max = 0;
Point minpt, maxpt;
minMaxLoc(mat, &min, &max, &minpt, &maxpt);
cout << "min: " << min << "; minpt: " << minpt << endl;
cout << "max: " << max << "; maxpt: " << maxpt << endl;

min: 0; minpt: [0, 0]
max: 15; maxpt: [3, 3]

 rowRange为指定的行span创建一个新的矩阵头,可取指定行区间元素。
colRange为指定的列span创建一个新的矩阵头,可取指定列区间元素。
rowRange(int x, int y) (其中y应小于等于行数,如一个矩阵为5行,那么y为4) 创建矩阵范围从x为首行开始,往后数(y-x)行。
rowRange(0, 3) 位 从第0行开始,往后数3行,即 0 、1、2行。colRange(int x,int y)同理。
Mat.rowRange(int x,int y) 与Mat.rowRange(range(int x, int y)得到的结果一样,函数取的实际行数y-x,只取到范围的左边界,而不取右边界。


#include<stdio.h>  
#include<opencv.hpp>  
#include<iostream>  
using namespace cv;
using namespace std;

int main()
{
	//初始化一个3*3的矩阵  
	Mat examples = (Mat_<float>(3, 3) << 1, 0, 0, 0, 1, 0, 0, 0, 1);
	//取example中中特定范围的行列构成矩阵  
	Mat row1 = examples.rowRange(Range(1, 2));  //取特定行  
	Mat row2 = examples.rowRange(1, 2);
	Mat row3 = examples.rowRange(0, 2);
	Mat col = examples.colRange(1, 3);         //取特定列  
	//n,p,m,q计数,用于控制矩阵元素的输出格式  
	int n = 0;
	int p = 0;
	int m = 0;
	int q = 0;
	int z = 0;
	//输出初始矩阵的元素  
	cout << "examples:" << endl;
	for (int i = 0; i<examples.rows; i++){
		for (int j = 0; j<examples.cols; j++)
		{
			cout << examples.at<float>(i, j);
			p++;
			if (p % 3 == 0)cout << endl;
		}
	}
	cout << endl;

	//测试rowRange(Range(int x,int y))  
	cout << "temp_row1:" << endl;
	for (int i = 0; i<row1.rows; i++)
	{
		for (int j = 0; j<row1.cols; j++){
			cout << row1.at<float>(i, j);
			n++;
			if (n % 3 == 0)
				cout << endl;
		}
	}
	cout << endl;

	//测试rowRange(int x,int y)函数  
	cout << "temp_row2:" << endl;
	for (int i = 0; i<row2.rows; i++)
	{
		for (int j = 0; j<row2.cols; j++){
			cout << row2.at<float>(i, j);
			m++;
			if (m % 3 == 0)
				cout << endl;
		}
	}
	cout << endl;

	cout << "row3:" << endl;
	for (int i = 0; i < row3.rows; i++)
	{
		for (int j = 0; j < row3.cols; j++)
		{
			cout << row3.at<float>(i, j);
			z++;
			if (z % 3 == 0)
				cout << endl;
		}
	}
	cout << endl;

	//测试colRange(int x,int y)函数  
	cout << "col:" << endl;
	for (int i = 0; i<col.rows; i++)
	{
		for (int j = 0; j<temp_col.cols; j++){
			cout << col.at<float>(i, j);
			q++;
			if (q % 2 == 0)
				cout << endl;
		}
	}
	return 0;
}

#include <opencv2/opencv.hpp> #include <iostream> #include <vector> using namespace cv; using namespace std; class DigitalDesign { public: // 计算相机中心 Mat getCameraCenter(const Mat& P) { Mat C; Mat R = P.colRange(0, 3); Mat T = P.col(3); Mat inv_R; invert(R, inv_R); C = -1 * inv_R * T; return C; } // 计算相机焦距 double getCameraf(const Mat& K) { double f; f = (abs(K.at<double>(0, 0)) + abs(K.at<double>(1, 1))) / 2; return f; } // 计算单应矩阵实现核线校正 Mat computeHomography(const Mat& K, const Mat& R, const Mat& rop, const Mat& kop) { // 单应矩阵 H = K * R * rop^(-1) * kop^(-1) return K * R * rop.t() * kop.inv(); } // 计算角点,确定影像范围 void getCornerpoints(const Mat& R, const Mat& K, const Mat& K_temp, const Mat& rop, const Mat& imsize, vector<double>& w_min, vector<double>& w_max, vector<double>& h_min, vector<double>& h_max) { Mat corner_Pts(3, 4, CV_64FC1); // 图像四个角点 corner_Pts.at<double>(0, 0) = 0; corner_Pts.at<double>(1, 0) = 0; corner_Pts.at<double>(2, 0) = 1; corner_Pts.at<double>(0, 1) = imsize.at<double>(0, 1) - 1; corner_Pts.at<double>(1, 1) = 0; corner_Pts.at<double>(2, 1) = 1; corner_Pts.at<double>(0, 2) = 0; corner_Pts.at<double>(1, 2) = imsize.at<double>(0, 0) - 1; corner_Pts.at<double>(2, 2) = 1; corner_Pts.at<double>(0, 3) = imsize.at<double>(0, 1) - 1; corner_Pts.at<double>(1, 3) = imsize.at<double>(0, 0) - 1; corner_Pts.at<double>(2, 3) = 1; Mat inv_K; invert(K, inv_K); Mat epi_corner_Pts = K_temp * rop * R.t() * inv_K * corner_Pts; for (int i = 0; i < 4; ++i) { for (int j = 0; j < 3; ++j) { epi_corner_Pts.at<double>(j, i) /= epi_corner_Pts.at<double>(2, i); // 齐次化 } } double min_w, max_w; minMaxLoc(epi_corner_Pts.row(0), &min_w, &max_w); double min_h, max_h; minMaxLoc(epi_corner_Pts.row(1), &min_h, &max_h); w_min.push_back(min_w); w_max.push_back(max_w); h_min.push_back(min_h); h_max.push_back(max_h); } // 生成核线影像(使用单应矩阵) Mat generateEpipolarLineImagesWithHomography(const Mat& image, const Mat& H, const Size& size) { Mat epi_image; // 使用单应矩阵进行透视变换 warpPerspective(image, epi_image, H.inv(), size); return epi_image; } }; int main() { Mat image1 = imread("015AR027.tif", IMREAD_COLOR); Mat image2 = imread("015AR028.tif", IMREAD_COLOR); if (image1.empty() || image2.empty()) { cout << "无法读取图像!请检查文件路径是否正确。" << endl; return -1; } // 创建投影矩阵P Mat P1 = (Mat_<double>(3, 4) << -12179.8753230552230, 7050.9952897638232, -2467.7527210258945, -17321440031.6463700, 4723.7392252925192, 5779.0243267188052, -11880.0042304789190, -22040783539.28058600, -0.4907881086633, -0.5142398041434, -0.7033380810316, 2000536.13817580420); Mat P2 = (Mat_<double>(3, 4) << -12158.6697497674500, 7092.3578385200199, -2453.7545028069599, -17474815958.9305270000000, 4621.9021026258170, 5524.7193480639962, -12039.9681076173220, -21127026467.7189250000000, -0.5027584877552, -0.5235467256044, -0.6878464429645, 2038373.8802346450000); // 定义内参矩阵K、旋转矩阵R和位移矩阵T Mat K1, R1, t1; Mat K2, R2, t2; // 分解投影矩阵P->K,R,T decomposeProjectionMatrix(P1, K1, R1, t1); // R1化为正 double det_R1 = determinant(R1); if (det_R1 < 0) { R1 = -R1; } // 去除齐次坐标 double tt1 = t1.at<double>(3, 0); Mat T1 = t1 / tt1; T1 = T1.rowRange(0, 3); decomposeProjectionMatrix(P2, K2, R2, t2); // R2化为正 double det_R2 = determinant(R2); if (det_R2 < 0) { R2 = -R2; } // 去除齐次坐标 double tt2 = t2.at<double>(3, 0); Mat T2 = t2 / tt2; T2 = T2.rowRange(0, 3); // 创建DigitalDesign类的实例 DigitalDesign dd; // 相机中心坐标 Mat C1 = dd.getCameraCenter(P1); Mat C2 = dd.getCameraCenter(P2); // 相机的焦距 double f1 = dd.getCameraf(K1); double f2 = dd.getCameraf(K2); // 计算基线 Mat B = C2 - C1; // 设置核线影像像平面(水平方向) Mat temp = (Mat_<double>(3, 1) << 0, 0, 1); temp = temp.t(); Mat r1, r2, r3; r1 = B / norm(B); r1 = r1.t(); r2 = temp.cross(B.t()) / norm(temp.cross(B.t())); r3 = r1.cross(r2) / norm(r1.cross(r2)); // 核线影像旋转矩阵 rop Mat rop(3, 3, CV_64FC1); r1.copyTo(rop.row(0)); r2.copyTo(rop.row(1)); r3.copyTo(rop.row(2)); // 计算核线影像焦距 double kop_f; Mat F1 = abs(rop.row(2) * R1.row(2).t() * f1); Mat F2 = abs(rop.row(2) * R2.row(2).t() * f2); kop_f = (F1.at<double>(0, 0) > F2.at<double>(0, 0)) ? F2.at<double>(0, 0) : F1.at<double>(0, 0); // 像主点为0时的内矩阵 Mat K_temp = (Mat_<double>(3, 3) << -kop_f, 0, 0, 0, -kop_f, 0, 0, 0, 1); // 图像尺寸信息 Mat imsize1(1, 3, CV_64FC1); imsize1.at<double>(0, 0) = image1.rows; // 高度 imsize1.at<double>(0, 1) = image1.cols; // 宽度 imsize1.at<double>(0, 2) = image1.channels(); Mat imsize2(1, 3, CV_64FC1); imsize2.at<double>(0, 0) = image2.rows; imsize2.at<double>(0, 1) = image2.cols; imsize2.at<double>(0, 2) = image2.channels(); // 计算角点,确定影像范围 vector<double> w_min, w_max, h_min, h_max; dd.getCornerpoints(R1, K1, K_temp, rop, imsize1, w_min, w_max, h_min, h_max); dd.getCornerpoints(R2, K2, K_temp, rop, imsize2, w_min, w_max, h_min, h_max); int epi_h[2] = { round(h_max[0] - h_min[0]), round(h_max[1] - h_min[1]) }; int epi_w[2] = { round(w_max[0] - w_min[0]), round(w_max[1] - w_min[1]) }; // 核线影像的像主点 Mat PP1 = (Mat_<double>(1, 2) << -w_min[0], h_max[0]); Mat PP2 = (Mat_<double>(1, 2) << -w_min[1], h_max[1]); // 计算核线影像的内参矩阵 kop Mat kop1 = (Mat_<double>(3, 3) << -kop_f, 0, PP1.at<double>(0, 0), 0, kop_f, PP1.at<double>(0, 1), 0, 0, 1); Mat kop2 = (Mat_<double>(3, 3) << -kop_f, 0, PP2.at<double>(0, 0), 0, kop_f, PP2.at<double>(0, 1), 0, 0, 1); // 计算单应矩阵 Mat H1 = dd.computeHomography(K1, R1, rop, kop1); Mat H2 = dd.computeHomography(K2, R2, rop, kop2); // 确定输出图像尺寸(取两者最大值以包含所有内容) Size epi_size(max(epi_w[0], epi_w[1]), max(epi_h[0], epi_h[1])); // 生成核线影像(使用单应矩阵) Mat epi_image1 = dd.generateEpipolarLineImagesWithHomography(image1, H1, epi_size); Mat epi_image2 = dd.generateEpipolarLineImagesWithHomography(image2, H2, epi_size); // 保存结果(请替换为你的保存路径) imwrite("EpiImage_Left_Homography.jpg", epi_image1); imwrite("EpiImage_Right_Homography.jpg", epi_image2); cout << "核线影像已保存。" << endl; // 显示图像 namedWindow("原始左图", WINDOW_NORMAL); namedWindow("原始右图", WINDOW_NORMAL); namedWindow("核线左图", WINDOW_NORMAL); namedWindow("核线右图", WINDOW_NORMAL); imshow("原始左图", image1); imshow("原始右图", image2); imshow("核线左图", epi_image1); imshow("核线右图", epi_image2); // 显示匹配效果(在核线影像上绘制水平线) for (int y = 0; y < epi_size.height; y += 50) { line(epi_image1, Point(0, y), Point(epi_size.width, y), Scalar(0, 255, 0), 1); line(epi_image2, Point(0, y), Point(epi_size.width, y), Scalar(0, 255, 0), 1); } namedWindow("核线左图(带水平线)", WINDOW_NORMAL); namedWindow("核线右图(带水平线)", WINDOW_NORMAL); imshow("核线左图(带水平线)", epi_image1); imshow("核线右图(带水平线)", epi_image2); waitKey(0); return 0; }把rop中的temp改为基线和(0,0,1)和基线的叉乘实现竖核线影像生成以及temp变为r1实现最接近原始图像的核线影像
07-12
给错了,这才是问题代码#include <opencv2/opencv.hpp> #include <direct.h> #include <fstream> #include <vector> #include <iostream> #include <cmath> using namespace cv; using namespace std; int main() { // 创建输出目录 if (_mkdir("D:\\data1\\shuchu") != 0 && errno != EEXIST) { printf("目录创建失败!错误代码: %d", errno); return -1; } // 1. 读取视差图 Mat disp = imread("D:/data1/data1/DispImg/017ER030_017ER031_DISP_ET0.tif", IMREAD_UNCHANGED); if (disp.empty()) { printf("视差图加载失败!请检查路径和文件格式"); return -1; } // 调试输出:视差图信息 cout << "===== 视差图信息 =====" << endl; cout << "尺寸: " << disp.cols << "x" << disp.rows << endl; cout << "类型: " << disp.type() << " ("; if (disp.type() == CV_8U) cout << "8位无符号整数)"; else if (disp.type() == CV_16U) cout << "16位无符号整数)"; else if (disp.type() == CV_32F) cout << "32位浮点数)"; else cout << "未知类型)"; cout << endl; double minVal, maxVal; minMaxLoc(disp, &minVal, &maxVal); cout << "视差范围: " << minVal << " - " << maxVal << endl; // 2. 视差图处理:取绝对值(因为您的视差图为负值) Mat dispAbs; disp.convertTo(dispAbs, CV_32F); dispAbs = abs(dispAbs); // 取绝对值 // 3. 视差图归一化处理 Mat dispNormalized; normalize(dispAbs, dispNormalized, 0, 255, NORM_MINMAX, CV_8U); // 4. 伪彩色增强 Mat colorMap; applyColorMap(dispNormalized, colorMap, COLORMAP_JET); // 5. 保存处理结果 imwrite("D:/data1/shuchu/disparity_normalized.tif", dispNormalized); imwrite("D:/data1/shuchu/disparity_colormap.tif", colorMap); // ====== 深度图计算和点云生成 ====== // 6. 使用您提供的投影矩阵参数 Mat P1 = (Mat_<double>(3, 4) << 5526.4864457012472, 6363.7183421283298, -4131.0482340017224, -24448406087.578186, 6172.8397894815880, -5588.8214670529014, -3384.5650572725663, 15591209748.279184, -0.0319353657001, 0.0213700694115, -0.9992614535500, -54202.4500385644710); Mat P2 = (Mat_<double>(3, 4) << 5544.3514690660313, 6327.0017140488953, -4163.3807394948353, -24333678727.601723, 6125.8241039199602, -5628.1100012630295, -3404.8221606458665, 15747662227.637783, -0.0362355312611, 0.0203282471373, -0.9991365015065, -48380.1370322157820); // 7. 计算相机参数 double cx1 = P1.at<double>(0, 2); double cy1 = P1.at<double>(1, 2); double f = P1.at<double>(0, 0); // 焦距 // 计算基线T(相机间距离) Mat P1_3x3 = P1.colRange(0, 3); Mat P2_3x3 = P2.colRange(0, 3); // 计算相机中心 Mat O1 = -P1_3x3.inv() * P1.col(3); Mat O2 = -P2_3x3.inv() * P2.col(3); double T = norm(O1 - O2); cout << "\n===== 相机参数 =====" << endl; cout << "焦距 f = " << f << endl; cout << "主点 cx = " << cx1 << ", cy = " << cy1 << endl; cout << "基线 T = " << T << " 单位" << endl; // 8. 计算深度图 const double minDisparity = 0.1; // 最小有效视差 Mat depthMap(dispAbs.size(), CV_32FC1, Scalar(0)); int validPoints = 0; for (int y = 0; y < dispAbs.rows; y++) { for (int x = 0; x < dispAbs.cols; x++) { float d = dispAbs.at<float>(y, x); if (d > minDisparity) { depthMap.at<float>(y, x) = (f * T) / d; validPoints++; } } } // 调试输出:深度图信息 cout << "\n===== 深度图信息 =====" << endl; cout << "有效点数量: " << validPoints << "/" << (dispAbs.rows * dispAbs.cols) << " (" << (100.0 * validPoints / (dispAbs.rows * dispAbs.cols)) << "%)" << endl; double minDepth, maxDepth; minMaxLoc(depthMap, &minDepth, &maxDepth, NULL, NULL, depthMap > 0); cout << "深度范围: " << minDepth << " - " << maxDepth << endl; // 9. 保存深度图 Mat depthVis; normalize(depthMap, depthVis, 0, 65535, NORM_MINMAX, CV_16U); imwrite("D:/data1/shuchu/depth_map.tif", depthVis); // 10. 读取左图像用于点云着色 Mat leftImg = imread("D:/data1/data1/017ER030.tif", IMREAD_COLOR); if (leftImg.empty()) { cerr << "错误:无法加载左图像!" << endl; return -1; } // 调整左图像尺寸以匹配视差图 Mat leftImgResized; resize(leftImg, leftImgResized, dispAbs.size()); // 11. 生成点云 string plyPath = "D:/data1/shuchu/point_cloud.ply"; cout << "\n===== 生成点云 =====" << endl; cout << "输出路径: " << plyPath << endl; ofstream plyFile(plyPath); if (!plyFile) { cerr << "错误:无法创建点云文件!" << endl; return -1; } // PLY文件头 plyFile << "ply\n"; plyFile << "format ascii 1.0\n"; plyFile << "element vertex " << validPoints << "\n"; plyFile << "property float x\n"; plyFile << "property float y\n"; plyFile << "property float z\n"; plyFile << "property uchar red\n"; plyFile << "property uchar green\n"; plyFile << "property uchar blue\n"; plyFile << "end_header\n"; // 生成点云数据 int count = 0; for (int y = 0; y < dispAbs.rows; y++) { for (int x = 0; x < dispAbs.cols; x++) { float depth = depthMap.at<float>(y, x); if (depth <= 0) continue; Vec3b color = leftImgResized.at<Vec3b>(y, x); // 计算3D坐标 float Z = depth; float X = (x - cx1) * Z / f; float Y = (y - cy1) * Z / f; // 写入点云数据 plyFile << X << " " << Y << " " << Z << " " << static_cast<int>(color[2]) << " " << static_cast<int>(color[1]) << " " << static_cast<int>(color[0]) << "\n"; count++; // 每处理10000个点输出进度 if (count % 10000 == 0) { cout << "已生成 " << count << "/" << validPoints << " 个点" << endl; } } } plyFile.close(); cout << "\n===== 点云生成完成 =====" << endl; cout << "共生成 " << count << " 个点" << endl; // ====== 显示结果 ====== Mat resultDisplay; Mat dispNormalized3ch; cvtColor(dispNormalized, dispNormalized3ch, COLOR_GRAY2BGR); hconcat(dispNormalized3ch, colorMap, resultDisplay); Mat depthDisplay; normalize(depthMap, depthDisplay, 0, 255, NORM_MINMAX, CV_8U); applyColorMap(depthDisplay, depthDisplay, COLORMAP_JET); imshow("Disparity Processing", resultDisplay); imshow("Depth Map", depthDisplay); waitKey(0); cout << "\n处理完成!请检查输出文件:" << endl; cout << "1. 视差图: D:/data1/shuchu/disparity_*.tif" << endl; cout << "2. 深度图: D:/data1/shuchu/depth_map.tif" << endl; cout << "3. 点云: " << plyPath << endl; return 0; }
07-17
import cv2 as cv import numpy as np import argparse parser = argparse.ArgumentParser() parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera') parser.add_argument('--thr', default=0.2, type=float, help='Threshold value for pose parts heat map') parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.') parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.') args = parser.parse_args() BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4, "LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9, "RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14, "LEye": 15, "REar": 16, "LEar": 17, "Background": 18 } POSE_PAIRS = [ ["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"], ["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"], ["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"], ["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"] ] inWidth = args.width inHeight = args.height net = cv.dnn.readNetFromTensorflow("graph_opt.pb") cap = cv.VideoCapture(args.input if args.input else 0) while cv.waitKey(1) < 0: hasFrame, frame = cap.read() if not hasFrame: cv.waitKey() break frameWidth = frame.shape[1] frameHeight = frame.shape[0] net.setInput(cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), (127.5, 127.5, 127.5), swapRB=True, crop=False)) out = net.forward() out = out[:, :19, :, :] # MobileNet output [1, 57, -1, -1], we only need the first 19 elements assert(len(BODY_PARTS) == out.shape[1]) points = [] for i in range(len(BODY_PARTS)): # Slice heatmap of corresponging body's part. heatMap = out[0, i, :, :] # Originally, we try to find all the local maximums. To simplify a sample # we just find a global one. However only a single pose at the same time # could be detected this way. _, conf, _, point = cv.minMaxLoc(heatMap) x = (frameWidth * point[0]) / out.shape[3] y = (frameHeight * point[1]) / out.shape[2] # Add a point if it's confidence is higher than threshold. points.append((int(x), int(y)) if conf > args.thr else None) for pair in POSE_PAIRS: partFrom = pair[0] partTo = pair[1] assert(partFrom in BODY_PARTS) assert(partTo in BODY_PARTS) idFrom = BODY_PARTS[partFrom] idTo = BODY_PARTS[partTo] if points[idFrom] and points[idTo]: cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3) cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) t, _ = net.getPerfProfile() freq = cv.getTickFrequency() / 1000 cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) 将以下Python代码, 转成C语言表达.
最新发布
10-12
### OpenCV 中 `minMaxLoc` 函数的用法 在 OpenCV 库中,`minMaxLoc` 是一个非常实用的函数,用于查找矩阵中的最小值和最大值及其对应的坐标位置。以下是该函数的具体说明以及使用示例: #### 函数定义 `minMaxLoc` 的基本语法如下: ```cpp void minMaxLoc(InputArray src, double* minVal, double* maxVal, Point* minLoc = nullptr, Point* maxLoc = nullptr, InputArray mask = noArray()); ``` 参数解释: - **src**: 输入数组(通常是单通道图像或矩阵)。 - **minVal**: 输出变量,存储找到的最小值。 - **maxVal**: 输出变量,存储找到的最大值。 - **minLoc**: 可选输出参数,表示最小值的位置。 - **maxLoc**: 可选输出参数,表示最大值的位置。 - **mask**: 可选掩码,指定要处理的区域。 此函数适用于灰度图或其他单通道数据结构[^1]。 --- #### Python 实现示例 以下是一个完整的 Python 示例,展示如何使用 `minMaxLoc` 找到图像中的最亮点和最暗点: ```python import cv2 import numpy as np # 加载图像并转换为灰度模式 img = cv2.imread('image.jpg', cv2.IMREAD_GRAYSCALE) if img is None: raise ValueError("Image not found or unable to load.") # 初始化变量 min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(img) print(f"Minimum Value: {min_val} at Location: {min_loc}") print(f"Maximum Value: {max_val} at Location: {max_loc}") # 显示结果 result_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) cv2.circle(result_img, min_loc, 5, (0, 0, 255), -1) # 绘制红色圆圈标记最小值位置 cv2.circle(result_img, max_loc, 5, (0, 255, 0), -1) # 绘制绿色圆圈标记最大值位置 cv2.imshow('Result Image with Min/Max Locations', result_img) cv2.waitKey(0) cv2.destroyAllWindows() ``` 在这个例子中,程序加载了一张图片,并将其转换为灰度形式以便于分析亮度分布。接着调用了 `minMaxLoc` 来获取像素强度范围内的极值信息[^1]。 --- #### C++ 实现示例 对于 C++ 用户来说,也可以轻松应用这一功能: ```cpp #include <opencv2/opencv.hpp> #include <iostream> int main() { // 加载图像并转为灰度 cv::Mat img = cv::imread("image.jpg", cv::IMREAD_GRAYSCALE); if (img.empty()) { std::cerr << "Error loading image!" << std::endl; return -1; } double minVal, maxVal; cv::Point minLoc, maxLoc; // 调用 minMaxLoc 获取极值及对应位置 cv::minMaxLoc(img, &minVal, &maxVal, &minLoc, &maxLoc); std::cout << "Min value: " << minVal << ", located at (" << minLoc.x << "," << minLoc.y << ")" << std::endl; std::cout << "Max value: " << maxVal << ", located at (" << maxLoc.x << "," << maxLoc.y << ")" << std::endl; // 创建彩色版本显示标注 cv::Mat resultImg; cv::cvtColor(img, resultImg, cv::COLOR_GRAY2BGR); cv::circle(resultImg, minLoc, 5, cv::Scalar(0, 0, 255), -1); // 红色标注最小值 cv::circle(resultImg, maxLoc, 5, cv::Scalar(0, 255, 0), -1); // 绿色标注最大值 cv::imshow("Result Image", resultImg); cv::waitKey(0); return 0; } ``` 这段代码同样实现了读取图像、寻找极端值的功能,并通过绘制圆形的方式直观地标记了这些特殊点[^1]。 --- #### 常见应用场景 1. 图像预处理阶段检测异常值。 2. 寻找目标区域内感兴趣的关键特征点。 3. 配合其他算法完成更复杂的任务,比如模板匹配优化或者自适应阈值设定。 ---
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值