OpenCV4函数合集开放

第二章 数据载入、显示与保存

2.1 图像存储容器

2.1.1 Mat类介绍

Mat分为矩阵头和指向存储数据的矩阵指针两部分。

代码清单2-1 创建Mat类

cv::Mat a; //创建一个名为a的矩阵头
a = cv::imread("test.jpd"); //向a中赋值图像数据,矩阵指针指向像素数据
cv::Mat b = a; //复制矩阵头,并命名为b

代码清单2-2 声明一个指定类型的Mat类

cv::Mat A = Mat_<double>(3,3); //创建一个3*3的矩阵用于存放double类型数据

代码清单2-3 通过OpenCV数据类型创建Mat类

cv::Mat a(640,480,CV_8UC3); //创建一个640*480的3通道矩阵用于存放彩色图像
cv::Mat a(3,3,CV_8UC1); //创建一个3*3的8位无符号整数的单通道矩阵
cv::Mat a(3,3,CV_8U); //创建单通道矩阵,c1标识可以省略

2.1.2 Mat类构造与赋值

1.Mat类的构造

代码清单2-4 默认构造函数使用方式

cv::Mat::Mat();

代码清单2-5 利用矩阵尺寸和类型参数构造Mat类

cv::Mat::Mat(int rows,
            int cols,
            int type
            )
  • rows:构造矩阵的行数
  • cols:矩阵的列数
  • type:矩阵中存储的数据类型

代码清单2-6 用Size()结构构造Mat

cv::Mat(Size size(),
       int type
       )
  • size:二维数组变量尺寸,通过Size(cols,rows)进行赋值
  • type:与代码清单2-5中的参数一致

代码清单2-7 用Size()结构构造Mat示例

cv::Mat a(Size(480,640),CV_8UC1); //构造一个行为640、列为480的单通道矩阵
cv::Mat b(Size(480,640),CV_32FC3); //构造一个行为640、列为480的3通道矩阵

代码清单2-8 利用已有矩阵构造Mat类

cv::Mat::Mat(const Mat & m);
  • m:是已经构建完成的Mat类矩阵数据

提示:如果希望复制两个一模一样的Mat类而彼此之间不会受影响,那么可以使用m=a.clone()实现

代码清单2-9 构造已有Mat类的子类

cv::Mat::Mat(const Mat & m,
            const Range & rowRange,
            const Range & rowRange = Rang::all()
            )
  • m:是已经构建完成的Mat类矩阵数据
  • rowRange:在已有矩阵中需要截取的行数范围,是一个Rang变量,例如从第2行到第5行可以表示位Rang(2,5)
  • rowRange:在已有矩阵中需要截取的列数范围,是一个Rang变量,例如从第2列到第5列可以表示位Rang(2,5),默认所有列都会截取。

代码清单2-10 在原Mat中截取子Mat类

cv::Mat b(a, Rang(2,5), Rang(2,5)); //从a中截取部分数据构造b
cv::Mat c(a, Rang(2,5)); //默认最后一个参数构成c

2.2 图像的读取与显示

2.2.1 图像读取函数 imread

imread()

empty()

2.2.2 图像窗口函数 namedWindow

namedWindow()

2.2.3 图像显示函数 imshow

imshow()

cv::waitKey()

2.3 视频加载与摄像头调用

2.3.1 视频数据的读取

VideoCapture类构造函数

isOpened()

“>>”

empty()

get()

代码清单2-28 读取视频文件

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	system("color F0");  //更改输出界面颜色
	VideoCapture video("cup.mp4");
	if (video.isOpened())
	{
		cout << "视频中图像的宽度=" << video.get(CAP_PROP_FRAME_WIDTH) << endl;
		cout << "视频中图像的高度=" << video.get(CAP_PROP_FRAME_HEIGHT) << endl;
		cout << "视频帧率=" << video.get(CAP_PROP_FPS) << endl;
		cout << "视频的总帧数=" << video.get(CAP_PROP_FRAME_COUNT);
	}
	else
	{
		cout << "请确认视频文件名称是否正确" << endl;
		return -1;
	}
	while (1)
	{
		Mat frame;
		video >> frame;
		if (frame.empty())
		{
			break;
		}
		imshow("video", frame);
		waitKey(1000 / video.get(CAP_PROP_FPS));
	}
	waitKey();
	return 0;
}

2.3.2 摄像头的直接调用

VideoCapture类还可以调用摄像头

2.4 数据保存

2.4.1 图像的保存

imwrite()

代码清单2-32 保存图像

#include <iostream>
#include <opencv2\opencv.hpp>

using	namespace std;
using	namespace cv;

void AlphaMat(Mat &mat)
{
	CV_Assert(mat.channels() == 4);
	for (int i = 0; i < mat.rows; ++i)
		{
			for (int j = 0; j < mat.cols; ++j)
			{
				Vec4b& bgra = mat.at<Vec4b>(i, j);
				bgra[0] = UCHAR_MAX;  // 蓝色通道
				bgra[1] = saturate_cast<uchar>((float(mat.cols - j)) / ((float)mat.cols) * UCHAR_MAX);  // 绿色通道
				bgra[2] = saturate_cast<uchar>((float(mat.rows - i)) / ((float)mat.rows) * UCHAR_MAX);  // 红色通道
				bgra[3] = saturate_cast<uchar>(0.5 * (bgra[1] + bgra[2]));  // Alpha通道
			}
		}
}
int main(int agrc, char** agrv)
{
	// Create mat with alpha channel
	Mat mat(480, 640, CV_8UC4);
	AlphaMat(mat);
	vector<int> compression_params;
	compression_params.push_back(IMWRITE_PNG_COMPRESSION);  //PNG格式图像压缩标志
	compression_params.push_back(9);  //设置最高压缩质量		
	bool result = imwrite("alpha.png", mat, compression_params);
	if (!result)
	{
		cout << "保存成PNG格式图像失败" << endl;
		return -1;
	}
	cout << "保存成功" << endl;
	return 0;
}

2.4.2 视频的保存

VideoWriter类构造函数

isOpened()

get()

“<<” 或者 write()

release()

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img;
	VideoCapture video(0);  //使用某个摄像头

	//读取视频
	//VideoCapture video;
	//video.open("cup.mp4");  

	if (!video.isOpened())  // 判断是否调用成功
	{
		cout << "打开摄像头失败,请确实摄像头是否安装成功";
		return -1;
	}

	video >> img;  //获取图像
	//检测是否成功获取图像
	if (img.empty())   //判断有没有读取图像成功
	{
		cout << "没有获取到图像" << endl;
		return -1;
	}
	bool isColor = (img.type() == CV_8UC3);  //判断相机(视频)类型是否为彩色

	VideoWriter writer;
	int codec = VideoWriter::fourcc('M', 'J', 'P', 'G');  // 选择编码格式
	//OpenCV 4.0版本设置编码格式
	//int codec = CV_FOURCC('M', 'J', 'P', 'G'); 

	double fps = 25.0;  //设置视频帧率 
	string filename = "live.avi";  //保存的视频文件名称
	writer.open(filename, codec, fps, img.size(), isColor);  //创建保存视频文件的视频流

	if (!writer.isOpened())   //判断视频流是否创建成功
	{
		cout << "打开视频文件失败,请确实是否为合法输入" << endl;
		return -1;
	}

	while (1)
	{
		//检测是否执行完毕
		if (!video.read(img))   //判断能都继续从摄像头或者视频文件中读出一帧图像
		{
			cout << "摄像头断开连接或者视频读取完成" << endl;
			break;
		}
		writer.write(img);  //把图像写入视频流
		//writer << img;
		imshow("Live", img);  //显示图像
		char c = waitKey(50);
		if (c == 27)  //按ESC案件退出视频保存
		{
			break;
		}
	}
	// 退出程序时刻自动关闭视频流
	//video.release();
	//writer.release();	
	return 0;
}

2.4.3 保存和读取XML和YMAL文件

FileStorage类构造函数

isOpened()

open()

“<<” 、 “>>”

#include <opencv2/opencv.hpp>
#include <iostream>
#include <string>

using namespace std;
using namespace cv;

int main(int argc, char** argv)
{
	system("color F0");  //修改运行程序背景和文字颜色
	//string fileName = "datas.xml";  //文件的名称
	string fileName = "datas.yaml";  //文件的名称
	//以写入的模式打开文件
	cv::FileStorage fwrite(fileName, cv::FileStorage::WRITE);
	
	//存入矩阵Mat类型的数据
	Mat mat = Mat::eye(3, 3, CV_8U);
	fwrite.write("mat", mat);  //使用write()函数写入数据
	//存入浮点型数据,节点名称为x
	float x = 100;
	fwrite << "x" << x;
	//存入字符串型数据,节点名称为str
	String str = "Learn OpenCV 4";
	fwrite << "str" << str;
	//存入数组,节点名称为number_array
	fwrite << "number_array" << "[" <<4<<5<<6<< "]";
	//存入多node节点数据,主名称为multi_nodes
	fwrite << "multi_nodes" << "{" << "month" << 8 << "day" << 28 << "year"
		<< 2019 << "time" << "[" << 0 << 1 << 2 << 3 << "]" << "}";

	//关闭文件
	fwrite.release();

	//以读取的模式打开文件
	cv::FileStorage fread(fileName, cv::FileStorage::READ);
	//判断是否成功打开文件
	if (!fread.isOpened())
	{
		cout << "打开文件失败,请确认文件名称是否正确!" << endl;
		return -1;
	}

	//读取文件中的数据
	float xRead;
	fread["x"] >> xRead;  //读取浮点型数据
	cout << "x=" << xRead << endl;

	//读取字符串数据
	string strRead;
	fread["str"] >> strRead;
	cout << "str=" << strRead << endl;

	//读取含多个数据的number_array节点
	FileNode fileNode = fread["number_array"];
	cout << "number_array=[";
	//循环遍历每个数据
	for (FileNodeIterator i = fileNode.begin(); i != fileNode.end(); i++)
	{
		float a;
		*i >> a;
		cout << a<<" ";
	}
	cout << "]" << endl;

	//读取Mat类型数据
	Mat matRead;
	fread["mat="] >> matRead;
	cout << "mat=" << mat << endl;

	//读取含有多个子节点的节点数据,不使用FileNode和迭代器进行读取
	FileNode fileNode1 = fread["multi_nodes"];
	int month = (int)fileNode1["month"];
	int day = (int)fileNode1["day"];
	int year = (int)fileNode1["year"];
	cout << "multi_nodes:" << endl 
		<< "  month=" << month << "  day=" << day << "  year=" << year;
	cout << "  time=[";
	for (int i = 0; i < 4; i++)
	{
		int a = (int)fileNode1["time"][i];
		cout << a << " ";
	}
	cout << "]" << endl;
	
	//关闭文件
	fread.release();
	return 0;
}

第三章 图像基本操作

3.1 图像颜色空间

3.1.1 颜色模型与转换

  1. RGB颜色模型
  2. YUV颜色模型
  3. HSV颜色模型
  4. Lab颜色模型
  5. GRAY颜色模型

6.不同颜色模型间的互想转换

cvtColor()函数用于将图像从一个颜色模型转换为另一个颜色模型

void cv::cvtColor(InputArray src,
                 OutputArray dst,
                 int code,
                 int dstCn = 0
                 )
  • src:待转换颜色模型原始图像
  • dst:转换颜色模型后的目标图像
  • code:颜色空间转换的标志,如由RGB空间到HSV空间
  • dstCn:目标图像中的通道数

代码清单3-2 图像颜色模型相互转换

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;

int main()
{
    Mat img = imread("E:/BaiduNetdiskDownload/data/lena.png");
    if (img.empty())
    {
        cout << "请确认图像文件名称师范正确" << endl;
        return -1;
    }
    Mat gray, HSV, YUV, Lab, img32;
    img.convertTo(img32, CV_32F, 1.0 / 255); //将CV_8U类型转换成CV_32F类型
    //img32.convertTo(img,CV_8U,255); //将CV_32F类型转换成CV_8U类型
    cvtColor(img32, HSV, COLOR_BGR2HSV);
    cvtColor(img32, YUV, COLOR_BGR2YUV);
    cvtColor(img32, Lab, COLOR_BGR2Lab);
    cvtColor(img32, gray, COLOR_BGR2GRAY);
    imshow("原图", img32);
    imshow("HSV", HSV);
    imshow("YUV", YUV);
    imshow("Lab", Lab);
    imshow("gray", gray);
    waitKey(0);
    return 0;
}

convertTo()

3.1.2 多通道分离与合并

split()

merge()

代码清单3-6 实现图像分离与合并

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;

int main()
{
	Mat img = imread("lena.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat HSV;
	cvtColor(img, HSV, COLOR_RGB2HSV);
	Mat imgs0, imgs1, imgs2;  //用于存放数组类型的结果
	Mat imgv0, imgv1, imgv2;  //用于存放vector类型的结果
	Mat result0, result1, result2;  //多通道合并的结果

									//输入数组参数的多通道分离与合并
	Mat imgs[3];
	split(img, imgs);
	imgs0 = imgs[0];
	imgs1 = imgs[1];
	imgs2 = imgs[2];
	imshow("RGB-B通道", imgs0);  //显示分离后B通道的像素值
	imshow("RGB-G通道", imgs1);  //显示分离后G通道的像素值
	imshow("RGB-R通道", imgs2);  //显示分离后R通道的像素值
	imgs[2] = img;  //将数组中的图像通道数变成不统一
	merge(imgs, 3, result0);  //合并图像
							  //imshow("result0", result0);  //imshow最多显示4个通道,因此结果在Image Watch中查看
	Mat zero = cv::Mat::zeros(img.rows, img.cols, CV_8UC1);
	imgs[0] = zero;
	imgs[2] = zero;
	merge(imgs, 3, result1);  //用于还原G通道的真实情况,合并结果为绿色
	imshow("result1", result1);  //显示合并结果

								 //输入vector参数的多通道分离与合并
	vector<Mat> imgv;
	split(HSV, imgv);
	imgv0 = imgv.at(0);
	imgv1 = imgv.at(1);
	imgv2 = imgv.at(2);
	imshow("HSV-H通道", imgv0);  //显示分离后H通道的像素值
	imshow("HSV-S通道", imgv1);  //显示分离后S通道的像素值
	imshow("HSV-V通道", imgv2);  //显示分离后V通道的像素值
	imgv.push_back(HSV);  //将vector中的图像通道数变成不统一
	merge(imgv, result2);  //合并图像
						   //imshow("result2", result2);  /imshow最多显示4个通道,因此结果在Image Watch中查看
	waitKey(0);
	return 0;
}

3.2 图像像素操作处理

3.2.1 图像像素统计

1.寻找图像像素最大值与最小值

minMaxLoc()

数据类型 Point

CU::Mat::reshape()

代码清单3-9 寻找矩阵中的最值

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;

int main()
{
	system("color F0");  //更改输出界面颜色
	float a[12] = { 1, 2, 3, 4, 5, 10, 6, 7, 8, 9, 10, 0 };
	Mat img = Mat(3, 4, CV_32FC1, a);  //单通道矩阵
	Mat imgs = Mat(2, 3, CV_32FC2, a);  //多通道矩阵
	double minVal, maxVal;  //用于存放矩阵中的最大值和最小值
	Point minIdx, maxIdx;  用于存放矩阵中的最大值和最小值在矩阵中的位置

	/*寻找单通道矩阵中的最值*/
	minMaxLoc(img, &minVal, &maxVal, &minIdx, &maxIdx);
	cout << "img中最大值是:" << maxVal << "  " << "在矩阵中的位置:" << maxIdx << endl;
	cout << "img中最小值是:" << minVal << "  " << "在矩阵中的位置:" << minIdx << endl;

	/*寻找多通道矩阵中的最值*/
	Mat imgs_re = imgs.reshape(1, 4);  //将多通道矩阵变成单通道矩阵
	minMaxLoc(imgs_re, &minVal, &maxVal, &minIdx, &maxIdx);
	cout << "imgs中最大值是:" << maxVal << "  " << "在矩阵中的位置:" << maxIdx << endl;
	cout << "imgs中最小值是:" << minVal << "  " << "在矩阵中的位置:" << minIdx << endl;
	return 0;
}

2.计算图像的平均值和标准差

meanStdDev() 函数用于同时计算平均值和标准差

mean() 计算平均值

cv::Scalar 类型

代码清单3-12 计算矩阵平均值和标准差

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;

int main()
{
	system("color F0");  //更改输出界面颜色
	float a[12] = { 1, 2, 3, 4, 5, 10, 6, 7, 8, 9, 10, 0 };
	Mat img = Mat(3, 4, CV_32FC1, a);  //单通道矩阵
	Mat imgs = Mat(2, 3, CV_32FC2, a);  //多通道矩阵

	cout << "/* 用meanStdDev同时求取图像的均值和标准差 */" << endl;
	Scalar myMean;
	myMean = mean(imgs);
	cout << "imgs均值=" << myMean << endl;
	cout << "imgs第一个通道的均值=" << myMean[0] << "    "
		<< "imgs第二个通道的均值=" << myMean[1] << endl << endl;

	cout << "/* 用meanStdDev同时求取图像的均值和标准差 */" << endl;
	Mat myMeanMat, myStddevMat;

	meanStdDev(img, myMeanMat, myStddevMat);
	cout << "img均值=" << myMeanMat << "    " << endl;
	cout << "img标准差=" << myStddevMat << endl << endl;
	meanStdDev(imgs, myMeanMat, myStddevMat);
	cout << "imgs均值=" << myMeanMat << "    " << endl << endl;
	cout << "imgs标准差=" << myStddevMat << endl;
	return 0;
}

2.2.2 两图像间的像素操作

1.两幅图像的比较运算

max()

min()

代码清单3-14 两个矩阵或图像进行比较运算

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;

int main()
{
	float a[12] = { 1, 2, 3.3, 4, 5, 9, 5, 7, 8.2, 9, 10, 2 };
	float b[12] = { 1, 2.2, 3, 1, 3, 10, 6, 7, 8, 9.3, 10, 1 };
	Mat imga = Mat(3, 4, CV_32FC1, a);
	Mat imgb = Mat(3, 4, CV_32FC1, b);
	Mat imgas = Mat(2, 3, CV_32FC2, a);
	Mat imgbs = Mat(2, 3, CV_32FC2, b);

	//对两个单通道矩阵进行比较运算
	Mat myMax, myMin;
	max(imga, imgb, myMax);
	min(imga, imgb, myMin);

	//对两个多通道矩阵进行比较运算
	Mat myMaxs, myMins;
	max(imgas, imgbs, myMaxs);
	min(imgas, imgbs, myMins);

	//对两张彩色图像进行比较运算
	Mat img0 = imread("len.png");
	Mat img1 = imread("noobcv.jpg");

	if (img0.empty() || img1.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat comMin, comMax;
	max(img0, img1, comMax);
	min(img0, img1, comMin);
	imshow("comMin", comMin);
	imshow("comMax", comMax);

	//与掩模进行比较运算
	Mat src1 = Mat::zeros(Size(512, 512), CV_8UC3);
	Rect rect(100, 100, 300, 300);
	src1(rect) = Scalar(255, 255, 255);  //生成一个低通300*300的掩模
	Mat comsrc1, comsrc2;
	min(img0, src1, comsrc1);
	imshow("comsrc1", comsrc1);

	Mat src2 = Mat(512, 512, CV_8UC3, Scalar(0, 0, 255));  //生成一个显示红色通道的低通掩模
	min(img0, src2, comsrc2);
	imshow("comsrc2", comsrc2);

	//对两张灰度图像进行比较运算
	Mat img0G, img1G, comMinG, comMaxG;
	cvtColor(img0, img0G, COLOR_BGR2GRAY);
	cvtColor(img1, img1G, COLOR_BGR2GRAY);
	max(img0G, img1G, comMaxG);
	min(img0G, img1G, comMinG);
	imshow("comMinG", comMinG);
	imshow("comMaxG", comMaxG);
	waitKey(0);
	return 0;
}

2.两幅图像的逻辑运算

biwise_and()

biwise_or()

biwise_xor()

biwise_not()

代码清单3-16 两个黑白图像像素逻辑运算

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;

int main()
{
	Mat img = imread("lena.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	//创建两个黑白图像
	Mat img0 = Mat::zeros(200, 200, CV_8UC1);
	Mat img1 = Mat::zeros(200, 200, CV_8UC1);
	Rect rect0(50, 50, 100, 100);
	img0(rect0) = Scalar(255);
	Rect rect1(100, 100, 100, 100);
	img1(rect1) = Scalar(255);
	imshow("img0", img0);
	imshow("img1", img1);

	//进行逻辑运算
	Mat myAnd, myOr, myXor, myNot, imgNot;
	bitwise_not(img0, myNot);
	bitwise_and(img0, img1, myAnd);
	bitwise_or(img0, img1, myOr);
	bitwise_xor(img0, img1, myXor);
	bitwise_not(img, imgNot);
	imshow("myAnd", myAnd);
	imshow("myOr", myOr);
	imshow("myXor", myXor);
	imshow("myNot", myNot);
	imshow("img", img);
	imshow("imgNot", imgNot);
	waitKey(0);
	return 0;
}

3.2.3 图像二值化

threshold()

adaptiveThreshold()

代码清单3-19 图像二值化

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;

int main()
{
	Mat img = imread("lena.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}

	Mat gray;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	Mat img_B, img_B_V, gray_B, gray_B_V, gray_T, gray_T_V, gray_TRUNC;

	//彩色图像二值化
	threshold(img, img_B, 125, 255, THRESH_BINARY);
	threshold(img, img_B_V, 125, 255, THRESH_BINARY_INV);
	imshow("img_B", img_B);
	imshow("img_B_V", img_B_V);

	//灰度图BINARY二值化
	threshold(gray, gray_B, 125, 255, THRESH_BINARY);
	threshold(gray, gray_B_V, 125, 255, THRESH_BINARY_INV);
	imshow("gray_B", gray_B);
	imshow("gray_B_V", gray_B_V);

	//灰度图像TOZERO变换
	threshold(gray, gray_T, 125, 255, THRESH_TOZERO);
	threshold(gray, gray_T_V, 125, 255, THRESH_TOZERO_INV);
	imshow("gray_T", gray_T);
	imshow("gray_T_V", gray_T_V);

	//灰度图像TRUNC变换
	threshold(gray, gray_TRUNC, 125, 255, THRESH_TRUNC);
	imshow("gray_TRUNC", gray_TRUNC);

	//灰度图像大津法和三角形法二值化
	Mat img_Thr = imread("threshold.png", IMREAD_GRAYSCALE);
	Mat img_Thr_O, img_Thr_T;
	threshold(img_Thr, img_Thr_O, 100, 255, THRESH_BINARY | THRESH_OTSU);
	threshold(img_Thr, img_Thr_T, 125, 255, THRESH_BINARY | THRESH_TRIANGLE);
	imshow("img_Thr", img_Thr);
	imshow("img_Thr_O", img_Thr_O);
	imshow("img_Thr_T", img_Thr_T);

	//灰度图像自适应二值化
	Mat adaptive_mean, adaptive_gauss;
	adaptiveThreshold(img_Thr, adaptive_mean, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 55, 0);
	adaptiveThreshold(img_Thr, adaptive_gauss, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 55, 0);

	imshow("adaptive_mean", adaptive_mean);
	imshow("adaptive_gauss", adaptive_gauss);
	waitKey(0);
	return 0;
}

3.2.4 LUT

LUT()

代码清单3-21 对图像进行查找表映射

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	//LUT查找表第一层
	uchar lutFirst[256];
	for (int i = 0; i<256; i++)
	{
		if (i <= 100)
			lutFirst[i] = 0;
		if (i > 100 && i <= 200)
			lutFirst[i] = 100;
		if (i > 200)
			lutFirst[i] = 255;
	}
	Mat lutOne(1, 256, CV_8UC1, lutFirst);

	//LUT查找表第二层
	uchar lutSecond[256];
	for (int i = 0; i<256; i++)
	{
		if (i <= 100)
			lutSecond[i] = 0;
		if (i > 100 && i <= 150)
			lutSecond[i] = 100;
		if (i > 150 && i <= 200)
			lutSecond[i] = 150;
		if (i > 200)
			lutSecond[i] = 255;
	}
	Mat lutTwo(1, 256, CV_8UC1, lutSecond);

	//LUT查找表第三层
	uchar lutThird[256];
	for (int i = 0; i<256; i++)
	{
		if (i <= 100)
			lutThird[i] = 100;
		if (i > 100 && i <= 200)
			lutThird[i] = 200;
		if (i > 200)
			lutThird[i] = 255;
	}
	Mat lutThree(1, 256, CV_8UC1, lutThird);

	//拥有三通道的LUT查找表矩阵
	vector<Mat> mergeMats;
	mergeMats.push_back(lutOne);
	mergeMats.push_back(lutTwo);
	mergeMats.push_back(lutThree);
	Mat LutTree;
	merge(mergeMats, LutTree);

	//计算图像的查找表
	Mat img = imread("lena.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}

	Mat gray, out0, out1, out2;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	LUT(gray, lutOne, out0);
	LUT(img, lutOne, out1);
	LUT(img, LutTree, out2);
	imshow("out0", out0);
	imshow("out1", out1);
	imshow("out2", out2);
	waitKey(0);
	return 0;
}

3.3 图像变换

3.3.1 图像连接

vconcat()

hconcat()

代码清单3-26 图像拼接

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	//矩阵数组的横竖连接
	Mat matArray[] = { Mat(1, 2, CV_32FC1, cv::Scalar(1)),
		Mat(1, 2, CV_32FC1, cv::Scalar(2)) };
	Mat vout, hout;
	vconcat(matArray, 2, vout);
	cout << "图像数组竖向连接:" << endl << vout << endl;
	hconcat(matArray, 2, hout);
	cout << "图像数组横向连接:" << endl << hout << endl;

	//矩阵的横竖拼接
	Mat A = (cv::Mat_<float>(2, 2) << 1, 7, 2, 8);
	Mat B = (cv::Mat_<float>(2, 2) << 4, 10, 5, 11);
	Mat vC, hC;
	vconcat(A, B, vC);
	cout << "多个图像竖向连接:" << endl << vC << endl;
	hconcat(A, B, hC);
	cout << "多个图像横向连接:" << endl << hC << endl;

	//读取4个子图像,00表示左上角、01表示右上角、10表示左下角、11表示右下角
	Mat img00 = imread("lena00.png");
	Mat img01 = imread("lena01.png");
	Mat img10 = imread("lena10.png");
	Mat img11 = imread("lena11.png");
	if (img00.empty() || img01.empty() || img10.empty() || img11.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	//显示4个子图像
	imshow("img00", img00);
	imshow("img01", img01);
	imshow("img10", img10);
	imshow("img11", img11);

	//图像连接
	Mat img, img0, img1;
	//图像横向连接
	hconcat(img00, img01, img0);
	hconcat(img10, img11, img1);
	//横向连接结果再进行竖向连接
	vconcat(img0, img1, img);

	//显示连接图像的结果
	imshow("img0", img0);
	imshow("img1", img1);
	imshow("img", img);
	waitKey(0);
	return 0;
}

3.3.2 图像尺寸变换

resize()

代码清单3-28 图像缩放

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	Mat gray = imread("lena.png", IMREAD_GRAYSCALE);
	if (gray.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}

	Mat smallImg, bigImg0, bigImg1, bigImg2;
	resize(gray, smallImg, Size(15, 15), 0, 0, INTER_AREA);  //先将图像缩小
	resize(smallImg, bigImg0, Size(30, 30), 0, 0, INTER_NEAREST);  //最近邻差值
	resize(smallImg, bigImg1, Size(30, 30), 0, 0, INTER_LINEAR);  //双线性差值
	resize(smallImg, bigImg2, Size(30, 30), 0, 0, INTER_CUBIC);  //双三次差值
	namedWindow("smallImg", WINDOW_NORMAL);  //图像尺寸太小,一定要设置可以调节窗口大小标志
	imshow("smallImg", smallImg);
	namedWindow("bigImg0", WINDOW_NORMAL);
	imshow("bigImg0", bigImg0);
	namedWindow("bigImg1", WINDOW_NORMAL);
	imshow("bigImg1", bigImg1);
	namedWindow("bigImg2", WINDOW_NORMAL);
	imshow("bigImg2", bigImg2);
	waitKey(0);
	return 0;
}

3.3.3 图像翻转变换

flip()

代码清单3-29 图像翻转

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	Mat img = imread("lena.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}

	Mat img_x, img_y, img_xy;
	flip(img, img_x, 0);  //沿x轴对称
	flip(img, img_y, 1);  //沿y轴对称
	flip(img, img_xy, -1);  //先x轴对称,再y轴对称
	imshow("img", img);
	imshow("img_x", img_x);
	imshow("img_y", img_y);
	imshow("img_xy", img_xy);
	waitKey(0);
	return 0;
}

3.3.4 图像放射变换

getRotationMatrix2D()

warpAffine()

getAffineTransform()

代码清单3-34 图像旋转与放射变换

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;

int main()
{
	Mat img = imread("lena.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}

	Mat rotation0, rotation1, img_warp0, img_warp1;
	double angle = 30;  //设置图像旋转的角度
	Size dst_size(img.rows, img.cols);  //设置输出图像的尺寸
	Point2f center(img.rows / 2.0, img.cols / 2.0);  //设置图像的旋转中心
	rotation0 = getRotationMatrix2D(center, angle, 1);  //计算放射变换矩阵
	warpAffine(img, img_warp0, rotation0, dst_size);  //进行仿射变换
	imshow("img_warp0", img_warp0);
	//根据定义的三个点进行仿射变换
	Point2f src_points[3];
	Point2f dst_points[3];
	src_points[0] = Point2f(0, 0);  //原始图像中的三个点
	src_points[1] = Point2f(0, (float)(img.cols - 1));
	src_points[2] = Point2f((float)(img.rows - 1), (float)(img.cols - 1));
	dst_points[0] = Point2f((float)(img.rows)*0.11, (float)(img.cols)*0.20);  //放射变换后图像中的三个点
	dst_points[1] = Point2f((float)(img.rows)*0.15, (float)(img.cols)*0.70);
	dst_points[2] = Point2f((float)(img.rows)*0.81, (float)(img.cols)*0.85);
	rotation1 = getAffineTransform(src_points, dst_points);  //根据对应点求取仿射变换矩阵
	warpAffine(img, img_warp1, rotation1, dst_size);  //进行仿射变换
	imshow("img_warp1", img_warp1);
	waitKey(0);
	return 0;
}

3.3.5 图像透视变换

getPerspectiveTransform()

warpPerspective()

代码清单3-37 二维码图像透视变换

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("noobcvqr.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}

	Point2f src_points[4];
	Point2f dst_points[4];
	//通过Image Watch查看的二维码四个角点坐标
	src_points[0] = Point2f(94.0, 374.0);
	src_points[1] = Point2f(507.0, 380.0);
	src_points[2] = Point2f(1.0, 623.0);
	src_points[3] = Point2f(627.0, 627.0);
	//期望透视变换后二维码四个角点的坐标
	dst_points[0] = Point2f(0.0, 0.0);
	dst_points[1] = Point2f(627.0, 0.0);
	dst_points[2] = Point2f(0.0, 627.0);
	dst_points[3] = Point2f(627.0, 627.0);
	Mat rotation, img_warp;
	rotation = getPerspectiveTransform(src_points, dst_points);  //计算透视变换矩阵
	warpPerspective(img, img_warp, rotation, img.size());  //透视变换投影
	imshow("img", img);
	imshow("img_warp", img_warp);
	waitKey(0);
	return 0;
}

3.3.6 极坐标变换

warpPolar()

代码清单3-39 图像极坐标变换

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	Mat img = imread("dial.png");
	if (!img.data)
	{
		cout << "请检查图像文件名称是否输入正确" << endl;
		return -1;
	}

	Mat img1, img2;
	Point2f center = Point2f(img.cols / 2, img.rows/2);  //极坐标在图像中的原点
	//正极坐标变换
	warpPolar(img, img1, Size(300,600), center, center.x, INTER_LINEAR + WARP_POLAR_LINEAR);
	//逆极坐标变换
	warpPolar(img1, img2, Size(img.rows,img.cols), center, center.x, INTER_LINEAR + WARP_POLAR_LINEAR + WARP_INVERSE_MAP);

	imshow("原表盘图", img);
	imshow("表盘极坐标变换结果", img1); 
	imshow("逆变换结果", img2);
	waitKey(0);
	return 0;
}

3.4 在图像上绘制几何图形

3.4.1 绘制圆形

circle()

3.4.2 绘制直线

line()

3.4.3 绘制椭圆

ellipse()

ellipse2Poly()

3.4.4 绘制多边形

rectangle()

fillPoly()

3.4.5 文字生成

putText()

代码清单3-47 绘制基本几何图形

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = Mat::zeros(Size(512, 512), CV_8UC3);  //生成一个黑色图像用于绘制几何图形
	//绘制圆形
	circle(img, Point(50, 50), 25, Scalar(255, 255, 255), -1);  //绘制一个实心圆
	circle(img, Point(100, 50), 20, Scalar(255, 255, 255), 4);  //绘制一个空心圆
	//绘制直线
	line(img, Point(100, 100), Point(200, 100), Scalar(255, 255, 255), 2, LINE_4,0);  //绘制一条直线
	//绘制椭圆
	ellipse(img, Point(300, 255), Size(100, 70), 0, 0, 100, Scalar(255, 255, 255), -1);  //绘制实心椭圆的一部分
	ellipse(img, RotatedRect(Point2f(150, 100), Size2f(30, 20), 0), Scalar(0, 0, 255), 2);  //绘制一个空心椭圆
	vector<Point> points;
	ellipse2Poly(Point(200, 400), Size(100, 70),0,0,360,2,points);  //用一些点来近似一个椭圆
	for (int i = 0; i < points.size()-1; i++)  //用直线把这个椭圆画出来
	{
		if (i==points.size()-1)
		{
			line(img, points[0], points[i], Scalar(255, 255, 255), 2);  //椭圆中后于一个点与第一个点连线
			break;
		}
		line(img, points[i], points[i+1], Scalar(255, 255, 255), 2);  //当前点与后一个点连线
	}
	//绘制矩形
	rectangle(img, Point(50, 400), Point(100, 450), Scalar(125, 125, 125), -1);  
	rectangle(img, Rect(400,450,60,50), Scalar(0, 125, 125), 2);
	//绘制多边形
	Point pp[2][6];
	pp[0][0] = Point(72, 200);
	pp[0][1] = Point(142, 204);
	pp[0][2] = Point(226, 263);
	pp[0][3] = Point(172, 310);
	pp[0][4] = Point(117, 319);
	pp[0][5] = Point(15, 260);
	pp[1][0] = Point(359, 339);
	pp[1][1] = Point(447, 351);
	pp[1][2] = Point(504, 349);
	pp[1][3] = Point(484, 433);
	pp[1][4] = Point(418, 449);
	pp[1][5] = Point(354, 402);
	Point pp2[5];
	pp2[0] = Point(350, 83);
	pp2[1] = Point(463, 90);
	pp2[2] = Point(500, 171);
	pp2[3] = Point(421, 194);
	pp2[4] = Point(338, 141);
	const Point* pts[3] = { pp[0],pp[1],pp2 };  //pts变量的生成
	int npts[] = { 6,6,5 };  //顶点个数数组的生成
	fillPoly(img, pts, npts, 3, Scalar(125, 125, 125),8);  //绘制3个多边形
	//生成文字
	putText(img, "Learn OpenCV 4",Point(100, 400), 2, 1, Scalar(255, 255, 255));
	imshow("", img);
	waitKey(0);
	return 0;
}

3.5 感兴趣区域

Rect数据结构和Rang数据结构

copyTo()

代码清单3-50 截图、深浅拷贝验证程序

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("lena.png");
	Mat noobcv = imread("noobcv.jpg");
	if (img.empty() || noobcv.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat ROI1, ROI2, ROI2_copy, mask, img2, img_copy, img_copy2;
	resize(noobcv, mask, Size(200, 200));
	img2 = img;  //浅拷贝
				 //深拷贝的两种方式
	img.copyTo(img_copy2);
	copyTo(img, img_copy, img);
	//两种在图中截取ROI区域的方式
	Rect rect(206, 206, 200, 200);  //定义ROI区域
	ROI1 = img(rect);  //截图
	ROI2 = img(Range(300, 500), Range(300, 500));  //第二种截图方式
	img(Range(300, 500), Range(300, 500)).copyTo(ROI2_copy);  //深拷贝
	mask.copyTo(ROI1);  //在图像中加入部分图像
	imshow("加入noobcv后图像", img);
	imshow("ROI对ROI2的影响", ROI2);
	imshow("深拷贝的ROI2_copy", ROI2_copy);
	circle(img, Point(300, 300), 20, Scalar(0, 0, 255), -1);  //绘制一个圆形
	imshow("浅拷贝的img2", img2);
	imshow("深拷贝的img_copy", img_copy);
	imshow("深拷贝的img_copy2", img_copy2);
	imshow("画圆对ROI1的影响", ROI1);
	waitKey(0);
	return 0;
}

3.6 图像“金字塔”

3.6.1 高斯“金字塔”

pyrDown()

3.6.2 高斯“金字塔”

pyrUp()

代码清单3-53 构建 高斯“金字塔” 和 高斯“金字塔”

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("lena.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}

	vector<Mat> Gauss, Lap;  //高斯金字塔和拉普拉斯金字塔
	int level = 3;  //高斯金字塔下采样次数
	Gauss.push_back(img);  //将原图作为高斯金字塔的第0层
						   //构建高斯金字塔
	for (int i = 0; i < level; i++)
	{
		Mat gauss;
		pyrDown(Gauss[i], gauss);  //下采样
		Gauss.push_back(gauss);
	}
	//构建拉普拉斯金字塔
	for (int i = Gauss.size() - 1; i > 0; i--)
	{
		Mat lap, upGauss;
		if (i == Gauss.size() - 1)  //如果是高斯金字塔中的最上面一层图像
		{
			Mat down;
			pyrDown(Gauss[i], down);  //上采样
			pyrUp(down, upGauss);
			lap = Gauss[i] - upGauss;
			Lap.push_back(lap);
		}
		pyrUp(Gauss[i], upGauss);
		lap = Gauss[i - 1] - upGauss;
		Lap.push_back(lap);
	}
	//查看两个金字塔中的图像
	for (int i = 0; i < Gauss.size(); i++)
	{
		string name = to_string(i);
		imshow("G" + name, Gauss[i]);
		imshow("L" + name, Lap[i]);
	}
	waitKey(0);
	return 0;
}

3.7 窗口交互操作

3.7.1 图像窗口滑动条

createTrackbar()

代码清单3-55 在图像中创建滑动条改变图像亮度

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

//为了能在被调函数中使用,所以设置成全局的
int value;
void callBack(int, void*);  //滑动条回调函数
Mat img1, img2;

int main()
{
	img1 = imread("lena.png");
	if (!img1.data)
	{
		cout << "请确认是否输入正确的图像文件" << endl;
		return -1;
	}
	namedWindow("滑动条改变图像亮度");
	imshow("滑动条改变图像亮度", img1);
	value = 100;  //滑动条创建时的初值
				  //创建滑动条
	createTrackbar("亮度值百分比", "滑动条改变图像亮度", &value, 600, callBack, 0);
	waitKey();
}

static void callBack(int, void*)
{
	float a = value / 100.0;
	img2 = img1 * a;
	imshow("滑动条改变图像亮度", img2);
}

3.7.2 鼠标相应

setMouseCallback()

MouseCallback类型

代码清单3-58 绘制鼠标移动轨迹

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

Mat img, imgPoint; //全局的图像
Point prePoint; //前一时刻鼠标的坐标,用于绘制直线
void mouse(int event, int x, int y, int flags, void*);

int main()
{
	img = imread("lena.png");
	if (!img.data)
		{
			cout << "请确认输入图像名称是否正确! " << endl;
			return -1;
		}
	img.copyTo(imgPoint);
	imshow("图像窗口 1", img);
	imshow("图像窗口 2", imgPoint);
	setMouseCallback("图像窗口 1", mouse, 0); //鼠标影响
	waitKey(0);
	return 0;
	}

void mouse(int event, int x, int y, int flags, void*)
{
	if (event == EVENT_RBUTTONDOWN) //单击右键
	{
		cout << "点击鼠标左键才可以绘制轨迹" << endl;
	}
	if (event == EVENT_LBUTTONDOWN) //单击左键,输出坐标
	{
		prePoint = Point(x, y);
		cout << "轨迹起始坐标" << prePoint << endl;
	}
	if (event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON)) //鼠标按住左键移动第 3 章 图像基本操作
	{
	 //通过改变图像像素显示鼠标移动轨迹
		imgPoint.at<Vec3b>(y, x) = Vec3b(0, 0, 255);
		imgPoint.at<Vec3b>(y, x - 1) = Vec3b(0, 0, 255);
		imgPoint.at<Vec3b>(y, x + 1) = Vec3b(0, 0, 255);
		imgPoint.at<Vec3b>(y + 1, x) = Vec3b(0, 0, 255);
		imgPoint.at<Vec3b>(y + 1, x) = Vec3b(0, 0, 255);
		imshow("图像窗口 2", imgPoint);
		
	    //通过绘制直线显示鼠标移动轨迹
		Point pt(x, y);
		line(img, prePoint, pt, Scalar(0, 0, 255), 2, 5, 0);
		prePoint = pt;
		imshow("图像窗口 1", img);
	}
}

第四章 图像直方图与模板匹配

4.1 图像直方图的绘制

calcHist()

cvRound()

代码清单4-2 绘制图像直方图

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("apple.jpg");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat gray;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	//设置提取直方图的相关变量
	Mat hist;  //用于存放直方图计算结果
	const int channels[1] = { 0 };  //通道索引
	float inRanges[2] = { 0,255 };
	const float* ranges[1] = { inRanges };  //像素灰度值范围
	const int bins[1] = { 256 };  //直方图的维度,其实就是像素灰度值的最大值
	calcHist(&gray, 1, channels, Mat(), hist, 1, bins, ranges);  //计算图像直方图
																//准备绘制直方图
	int hist_w = 512;
	int hist_h = 400;
	int width = 2;
	Mat histImage = Mat::zeros(hist_h, hist_w, CV_8UC3);
	for (int i = 1; i <= hist.rows; i++)
	{
		rectangle(histImage, Point(width*(i - 1), hist_h - 1),
			Point(width*i - 1, hist_h - cvRound(hist.at<float>(i - 1) / 15)),
			Scalar(255, 255, 255), -1);
	}
	namedWindow("histImage", WINDOW_AUTOSIZE);
	imshow("histImage", histImage);
	imshow("gray", gray);
	waitKey(0);
	return 0;
}

4.2 直方图操作

4.2.1 直方图归一化

normalize()

代码清单4-4 直方图归一化操作

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //更改输出界面颜色
	vector<double> positiveData = { 2.0, 8.0, 10.0 };
	vector<double> normalized_L1, normalized_L2, normalized_Inf, normalized_L2SQR;
	//测试不同归一化方法
	normalize(positiveData, normalized_L1, 1.0, 0.0, NORM_L1);  //绝对值求和归一化
	cout << "normalized_L1=[" << normalized_L1[0] << ", "
		<< normalized_L1[1] << ", " << normalized_L1[2] << "]" << endl;
	normalize(positiveData, normalized_L2, 1.0, 0.0, NORM_L2);  //模长归一化
	cout << "normalized_L2=[" << normalized_L2[0] << ", "
		<< normalized_L2[1] << ", " << normalized_L2[2] << "]" << endl;
	normalize(positiveData, normalized_Inf, 1.0, 0.0, NORM_INF);  //最大值归一化
	cout << "normalized_Inf=[" << normalized_Inf[0] << ", "
		<< normalized_Inf[1] << ", " << normalized_Inf[2] << "]" << endl;
	normalize(positiveData, normalized_L2SQR, 1.0, 0.0, NORM_MINMAX);  //偏移归一化
	cout << "normalized_MINMAX=[" << normalized_L2SQR[0] << ", "
		<< normalized_L2SQR[1] << ", " << normalized_L2SQR[2] << "]" << endl;
	//将图像直方图归一化
	Mat img = imread("apple.jpg");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat gray, hist;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	const int channels[1] = { 0 };
	float inRanges[2] = { 0,255 };
	const float* ranges[1] = { inRanges };
	const int bins[1] = { 256 };
	calcHist(&gray, 1, channels, Mat(), hist, 1, bins, ranges);
	int hist_w = 512;
	int hist_h = 400;
	int width = 2;
	Mat histImage_L1 = Mat::zeros(hist_h, hist_w, CV_8UC3);
	Mat histImage_Inf = Mat::zeros(hist_h, hist_w, CV_8UC3);
	Mat hist_L1, hist_Inf;
	normalize(hist, hist_L1, 1, 0, NORM_L1, -1, Mat());
	for (int i = 1; i <= hist_L1.rows; i++)
	{
		rectangle(histImage_L1, Point(width*(i - 1), hist_h - 1),
			Point(width*i - 1, hist_h - cvRound(30 * hist_h*hist_L1.at<float>(i - 1)) - 1),
			Scalar(255, 255, 255), -1);
	}
	normalize(hist, hist_Inf, 1, 0, NORM_INF, -1, Mat());
	for (int i = 1; i <= hist_Inf.rows; i++)
	{
		rectangle(histImage_Inf, Point(width*(i - 1), hist_h - 1),
			Point(width*i - 1, hist_h - cvRound(hist_h*hist_Inf.at<float>(i - 1)) - 1),
			Scalar(255, 255, 255), -1);
	}
	imshow("histImage_L1", histImage_L1);
	imshow("histImage_Inf", histImage_Inf);
	waitKey(0);
	return 0;
}

4.2.2 直方图比较

compareHist()

代码清单4-6 比较两个直方图的相似性

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

void drawHist(Mat &hist, int type, string name)  //归一化并绘制直方图函数
{
	int hist_w = 512;
	int hist_h = 400;
	int width = 2;
	Mat histImage = Mat::zeros(hist_h, hist_w, CV_8UC3);
	normalize(hist, hist, 1, 0, type, -1, Mat());
	for (int i = 1; i <= hist.rows; i++)
	{
		rectangle(histImage, Point(width*(i - 1), hist_h - 1),
			Point(width*i - 1, hist_h - cvRound(hist_h*hist.at<float>(i - 1)) - 1),
			Scalar(255, 255, 255), -1);
	}
	imshow(name, histImage);
}
//主函数
int main()
{
	system("color F0");  //更改输出界面颜色
	Mat img = imread("apple.jpg");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat gray, hist, gray2, hist2, gray3, hist3;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	resize(gray, gray2, Size(), 0.5, 0.5);
	gray3 = imread("lena.png", IMREAD_GRAYSCALE);
	const int channels[1] = { 0 };
	float inRanges[2] = { 0,255 };
	const float* ranges[1] = { inRanges };
	const int bins[1] = { 256 };
	calcHist(&gray, 1, channels, Mat(), hist, 1, bins, ranges);
	calcHist(&gray2, 1, channels, Mat(), hist2, 1, bins, ranges);
	calcHist(&gray3, 1, channels, Mat(), hist3, 1, bins, ranges);
	drawHist(hist, NORM_INF, "hist");
	drawHist(hist2, NORM_INF, "hist2");
	drawHist(hist3, NORM_INF, "hist3");
	//原图直方图与原图直方图的相关系数
	double hist_hist = compareHist(hist, hist, HISTCMP_CORREL);
	cout << "apple_apple=" << hist_hist << endl;
	//原图直方图与缩小原图直方图的相关系数
	double hist_hist2 = compareHist(hist, hist2, HISTCMP_CORREL);
	cout << "apple_apple256=" << hist_hist2 << endl;
	//两张不同图像直方图相关系数
	double hist_hist3 = compareHist(hist, hist3, HISTCMP_CORREL);
	cout << "apple_lena=" << hist_hist3 << endl;
	waitKey(0);
	return 0;
}

4.3 直方图应用

4.3.1 直方图均衡化

equalizeHist()

代码清单4-8 直方图均衡化实现

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

void drawHist(Mat &hist, int type, string name)  //归一化并绘制直方图函数
{
	int hist_w = 512;
	int hist_h = 400;
	int width = 2;
	Mat histImage = Mat::zeros(hist_h, hist_w, CV_8UC3);
	normalize(hist, hist, 1, 0, type, -1, Mat());
	for (int i = 1; i <= hist.rows; i++)
	{
		rectangle(histImage, Point(width*(i - 1), hist_h - 1),
			Point(width*i - 1, hist_h - cvRound(hist_h*hist.at<float>(i - 1)) - 1),
			Scalar(255, 255, 255), -1);
	}
	imshow(name, histImage);
}
//主函数
int main()
{
	Mat img = imread("gearwheel.jpg");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat gray, hist, hist2;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	Mat equalImg;
	equalizeHist(gray, equalImg);  //将图像直方图均衡化
	const int channels[1] = { 0 };
	float inRanges[2] = { 0,255 };
	const float* ranges[1] = { inRanges };
	const int bins[1] = { 256 };
	calcHist(&gray, 1, channels, Mat(), hist, 1, bins, ranges);
	calcHist(&equalImg, 1, channels, Mat(), hist2, 1, bins, ranges);
	drawHist(hist, NORM_INF, "hist");
	drawHist(hist2, NORM_INF, "hist2");
	imshow("原图", gray);
	imshow("均衡化后的图像", equalImg);
	waitKey(0);
	return 0;
}

4.3.2 直方图匹配

代码清单4-9 图像直方图匹配

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

void drawHist(Mat &hist, int type, string name)  //归一化并绘制直方图函数
{
	int hist_w = 512;
	int hist_h = 400;
	int width = 2;
	Mat histImage = Mat::zeros(hist_h, hist_w, CV_8UC3);
	normalize(hist, hist, 1, 0, type, -1, Mat());
	for (int i = 1; i <= hist.rows; i++)
	{
		rectangle(histImage, Point(width*(i - 1), hist_h - 1),
			Point(width*i - 1, hist_h - cvRound(20 * hist_h*hist.at<float>(i - 1)) - 1),
			Scalar(255, 255, 255), -1);
	}
	imshow(name, histImage);
}
//主函数
int main()
{
	Mat img1 = imread("histMatch.png");
	Mat img2 = imread("equalLena.png");
	if (img1.empty() || img2.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat hist1, hist2;
	//计算两张图像直方图
	const int channels[1] = { 0 };
	float inRanges[2] = { 0,255 };
	const float* ranges[1] = { inRanges };
	const int bins[1] = { 256 };
	calcHist(&img1, 1, channels, Mat(), hist1, 1, bins, ranges);
	calcHist(&img2, 1, channels, Mat(), hist2, 1, bins, ranges);
	//归一化两张图像的直方图
	drawHist(hist1, NORM_L1, "hist1");
	drawHist(hist2, NORM_L1, "hist2");
	//计算两张图像直方图的累积概率
	float hist1_cdf[256] = { hist1.at<float>(0) };
	float hist2_cdf[256] = { hist2.at<float>(0) };
	for (int i = 1; i < 256; i++)
	{
		hist1_cdf[i] = hist1_cdf[i - 1] + hist1.at<float>(i);
		hist2_cdf[i] = hist2_cdf[i - 1] + hist2.at<float>(i);

	}
	//构建累积概率误差矩阵
	float diff_cdf[256][256];
	for (int i = 0; i < 256; i++)
	{
		for (int j = 0; j < 256; j++)
		{
			diff_cdf[i][j] = fabs(hist1_cdf[i] - hist2_cdf[j]);
		}
	}

	//生成LUT映射表
	Mat lut(1, 256, CV_8U);
	for (int i = 0; i < 256; i++)
	{
		// 查找源灰度级为i的映射灰度
		// 和i的累积概率差值最小的规定化灰度
		float min = diff_cdf[i][0];
		int index = 0;
		//寻找累积概率误差矩阵中每一行中的最小值
		for (int j = 1; j < 256; j++)
		{
			if (min > diff_cdf[i][j])
			{
				min = diff_cdf[i][j];
				index = j;
			}
		}
		lut.at<uchar>(i) = (uchar)index;
	}
	Mat result, hist3;
	LUT(img1, lut, result);
	imshow("待匹配图像", img1);
	imshow("匹配的模板图像", img2);
	imshow("直方图匹配结果", result);
	calcHist(&result, 1, channels, Mat(), hist3, 1, bins, ranges);
	drawHist(hist3, NORM_L1, "hist3");  //绘制匹配后的图像直方图
	waitKey(0);
	return 0;
}

4.3.3 直方图反向投影

calcBackProject()

代码清单4-11 图像直方图反向摄影

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

void drawHist(Mat &hist, int type, string name)  //归一化并绘制直方图函数
{
	int hist_w = 512;
	int hist_h = 400;
	int width = 2;
	Mat histImage = Mat::zeros(hist_h, hist_w, CV_8UC3);
	normalize(hist, hist, 255, 0, type, -1, Mat());
	namedWindow(name, WINDOW_NORMAL);
	imshow(name, hist);
}
//主函数
int main()
{
	Mat img = imread("apple.jpg");
	Mat sub_img = imread("sub_apple.jpg");
	Mat img_HSV, sub_HSV, hist, hist2;
	if (img.empty() || sub_img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}

	imshow("img", img);
	imshow("sub_img", sub_img);
	//转成HSV空间,提取S、V两个通道
	cvtColor(img, img_HSV, COLOR_BGR2HSV);
	cvtColor(sub_img, sub_HSV, COLOR_BGR2HSV);
	int h_bins = 32; int s_bins = 32;
	int histSize[] = { h_bins, s_bins };
	//H通道值的范围由0到179
	float h_ranges[] = { 0, 180 };
	//S通道值的范围由0到255
	float s_ranges[] = { 0, 256 };
	const float* ranges[] = { h_ranges, s_ranges };  //每个通道的范围
	int channels[] = { 0, 1 };  //统计的通道索引
								//绘制H-S二维直方图
	calcHist(&sub_HSV, 1, channels, Mat(), hist, 2, histSize, ranges, true, false);
	drawHist(hist, NORM_INF, "hist");  //直方图归一化并绘制直方图
	Mat backproj;
	calcBackProject(&img_HSV, 1, channels, hist, backproj, ranges, 1.0);  //直方图反向投影
	imshow("反向投影后结果", backproj);
	waitKey(0);
	return 0;
}

4.4 图像的模板匹配

matchTemplate()

代码清单4-13 图像的模板匹配

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("lena.png");
	Mat temp = imread("lena_face.png");
	if (img.empty() || temp.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat result;
	matchTemplate(img, temp, result, TM_CCOEFF_NORMED);//模板匹配
	double maxVal, minVal;
	Point minLoc, maxLoc;
	//寻找匹配结果中的最大值和最小值以及坐标位置
	minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc);
	//绘制最佳匹配区域
	rectangle(img, cv::Rect(maxLoc.x, maxLoc.y, temp.cols, temp.rows), Scalar(0, 0, 255), 2);
	imshow("原图", img);
	imshow("模板图像", temp);
	imshow("result", result);
	waitKey(0);
	return 0;
}

第五章 图像滤波

5.1 图像卷积

filter2D()

代码清单5-2 图像卷积

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	//待卷积矩阵
	uchar points[25] = { 1,2,3,4,5,
		6,7,8,9,10,
		11,12,13,14,15,
		16,17,18,19,20,
		21,22,23,24,25 };
	Mat img(5, 5, CV_8UC1, points);
	//卷积模板
	Mat kernel = (Mat_<float>(3, 3) << 1, 2, 1,
		2, 0, 2,
		1, 2, 1);
	Mat kernel_norm = kernel / 12;  //卷积模板归一化
									//未归一化卷积结果和归一化卷积结果
	Mat result, result_norm;
	filter2D(img, result, CV_32F, kernel, Point(-1, -1), 2, BORDER_CONSTANT);
	filter2D(img, result_norm, CV_32F, kernel_norm, Point(-1, -1), 2, BORDER_CONSTANT);
	cout << "result:" << endl << result << endl;
	cout << "result_norm:" << endl << result_norm << endl;
	//图像卷积
	Mat lena = imread("lena.png");
	if (lena.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat lena_fillter;
	filter2D(lena, lena_fillter, -1, kernel_norm, Point(-1, -1), 2, BORDER_CONSTANT);
	imshow("lena_fillter", lena_fillter);
	imshow("lena", lena);
	waitKey(0);
	return 0;
}

5.2 噪声的种类与生成

5.2.1 椒盐噪声

rand()

rand_double() 、 rand_int()

cvflann类

stlib.h头文件

代码清单5-4 图像中添加椒盐噪声

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

//盐噪声函数
void saltAndPepper(cv::Mat image, int n)
{
	for (int k = 0; k<n / 2; k++)
	{
		//随机确定图像中位置
		int i, j;
		i = std::rand() % image.cols;  //取余数运算,保证在图像的列数内 
		j = std::rand() % image.rows;  //取余数运算,保证在图像的行数内 
		int write_black = std::rand() % 2;  //判定为白色噪声还是黑色噪声的变量
		if (write_black == 0)  //添加白色噪声
		{
			if (image.type() == CV_8UC1)  //处理灰度图像
			{
				image.at<uchar>(j, i) = 255;  //白色噪声
			}
			else if (image.type() == CV_8UC3)  //处理彩色图像
			{
				image.at<cv::Vec3b>(j, i)[0] = 255; //cv::Vec3b为opencv定义的一个3个值的向量类型  
				image.at<cv::Vec3b>(j, i)[1] = 255; //[]指定通道,B:0,G:1,R:2  
				image.at<cv::Vec3b>(j, i)[2] = 255;
			}
		}
		else  //添加黑色噪声
		{
			if (image.type() == CV_8UC1)
			{
				image.at<uchar>(j, i) = 0;
			}
			else if (image.type() == CV_8UC3)
			{
				image.at<cv::Vec3b>(j, i)[0] = 0; //cv::Vec3b为opencv定义的一个3个值的向量类型  
				image.at<cv::Vec3b>(j, i)[1] = 0; //[]指定通道,B:0,G:1,R:2  
				image.at<cv::Vec3b>(j, i)[2] = 0;
			}
		}

	}
}

int main()
{
	Mat lena = imread("lena.png");
	Mat equalLena = imread("equalLena.png", IMREAD_ANYDEPTH);
	if (lena.empty() || equalLena.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	imshow("lena原图", lena);
	imshow("equalLena原图", equalLena);
	saltAndPepper(lena, 10000);  //彩色图像添加椒盐噪声
	saltAndPepper(equalLena, 10000);  //灰度图像添加椒盐噪声
	imshow("lena添加噪声", lena);
	imshow("equalLena添加噪声", equalLena);
	waitKey(0);
	return 0;
}

5.2.2 高斯噪声

fill()

RNG::fill()

代码清单5-7 图像中添加高斯噪声

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat lena = imread("lena.png");
	Mat equalLena = imread("equalLena.png", IMREAD_ANYDEPTH);
	if (lena.empty() || equalLena.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	//生成与原图像同尺寸、数据类型和通道数的矩阵
	Mat lena_noise = Mat::zeros(lena.rows, lena.cols, lena.type());
	Mat equalLena_noise = Mat::zeros(lena.rows, lena.cols, equalLena.type());
	imshow("lena原图", lena);
	imshow("equalLena原图", equalLena);
	RNG rng;  //创建一个RNG类
	rng.fill(lena_noise, RNG::NORMAL, 10, 20);  //生成三通道的高斯分布随机数
	rng.fill(equalLena_noise, RNG::NORMAL, 15, 30);  //生成三通道的高斯分布随机数
	imshow("三通道高斯噪声", lena_noise);
	imshow("单通道高斯噪声", equalLena_noise);
	lena = lena + lena_noise;  //在彩色图像中添加高斯噪声
	equalLena = equalLena + equalLena_noise;  //在灰度图像中添加高斯噪声
											  //显示添加高斯噪声后的图像
	imshow("lena添加噪声", lena);
	imshow("equalLena添加噪声", equalLena);
	waitKey(0);
	return 0;
}

5.3 线性滤波

5.3.1 均值滤波

blur()

代码清单5-9 图像均值滤波

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat equalLena = imread("equalLena.png", IMREAD_ANYDEPTH);
	Mat equalLena_gauss = imread("equalLena_gauss.png", IMREAD_ANYDEPTH);
	Mat equalLena_salt = imread("equalLena_salt.png", IMREAD_ANYDEPTH);
	if (equalLena.empty() || equalLena_gauss.empty() || equalLena_salt.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat result_3, result_9;  //存放不含噪声滤波结果,后面数字代表滤波器尺寸
	Mat result_3gauss, result_9gauss;  //存放含有高斯噪声滤波结果,后面数字代表滤波器尺寸
	Mat result_3salt, result_9salt;  //存放含有椒盐噪声滤波结果,后面数字代表滤波器尺寸
	//调用均值滤波函数blur()进行滤波
	blur(equalLena, result_3, Size(3, 3));
	blur(equalLena, result_9, Size(9, 9));
	blur(equalLena_gauss, result_3gauss, Size(3, 3));
	blur(equalLena_gauss, result_9gauss, Size(9, 9));
	blur(equalLena_salt, result_3salt, Size(3, 3));
	blur(equalLena_salt, result_9salt, Size(9, 9));
	//显示不含噪声图像
	imshow("equalLena ", equalLena);
	imshow("result_3", result_3);
	imshow("result_9", result_9);
	//显示含有高斯噪声图像
	imshow("equalLena_gauss", equalLena_gauss);
	imshow("result_3gauss", result_3gauss);
	imshow("result_9gauss", result_9gauss);
	//显示含有椒盐噪声图像
	imshow("equalLena_salt", equalLena_salt);
	imshow("result_3salt", result_3salt);
	imshow("result_9salt", result_9salt);
	waitKey(0);
	return 0;
}

5.3.2 方框滤波

boxFilter()

sqrBoxFilter()

代码清单5-12 图像方框滤波

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat equalLena = imread("equalLena.png", IMREAD_ANYDEPTH);  //用于方框滤波的图像
	if (equalLena.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	//验证方框滤波算法的数据矩阵
	float points[25] = { 1,2,3,4,5,
		6,7,8,9,10,
		11,12,13,14,15,
		16,17,18,19,20,
		21,22,23,24,25 };
	Mat data(5, 5, CV_32FC1, points);
	//将CV_8U类型转换成CV_32F类型
	Mat equalLena_32F;
	equalLena.convertTo(equalLena_32F, CV_32F, 1.0 / 255);
	Mat resultNorm, result, dataSqrNorm, dataSqr, equalLena_32FSqr;
	//方框滤波boxFilter()和sqrBoxFilter()
	boxFilter(equalLena, resultNorm, -1, Size(3, 3), Point(-1, -1), true);  //进行归一化
	boxFilter(equalLena, result, -1, Size(3, 3), Point(-1, -1), false);  //不进行归一化
	sqrBoxFilter(data, dataSqrNorm, -1, Size(3, 3), Point(-1, -1),
		true, BORDER_CONSTANT);  //进行归一化
	sqrBoxFilter(data, dataSqr, -1, Size(3, 3), Point(-1, -1),
		false, BORDER_CONSTANT);  //不进行归一化
	sqrBoxFilter(equalLena_32F, equalLena_32FSqr, -1, Size(3, 3), Point(-1, -1),
		true, BORDER_CONSTANT);
	//显示处理结果
	imshow("resultNorm", resultNorm);
	imshow("result", result);
	imshow("equalLena_32FSqr", equalLena_32FSqr);
	waitKey(0);
	return 0;
}

5.3.3 高斯滤波

GaussianBlur()

getGaussianKernel()

代码清单5-15 图像高斯滤波

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat equalLena = imread("equalLena.png", IMREAD_ANYDEPTH);
	Mat equalLena_gauss = imread("equalLena_gauss.png", IMREAD_ANYDEPTH);
	Mat equalLena_salt = imread("equalLena_salt.png", IMREAD_ANYDEPTH);
	if (equalLena.empty() || equalLena_gauss.empty() || equalLena_salt.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat result_5, result_9;  //存放不含噪声滤波结果,后面数字代表滤波器尺寸
	Mat result_5gauss, result_9gauss;  //存放含有高斯噪声滤波结果,后面数字代表滤波器尺寸
	Mat result_5salt, result_9salt;  存放含有椒盐噪声滤波结果,后面数字代表滤波器尺寸
	 //调用均值滤波函数blur()进行滤波
	GaussianBlur(equalLena, result_5, Size(5, 5), 10, 20);
	GaussianBlur(equalLena, result_9, Size(9, 9), 10, 20);
	GaussianBlur(equalLena_gauss, result_5gauss, Size(5, 5), 10, 20);
	GaussianBlur(equalLena_gauss, result_9gauss, Size(9, 9), 10, 20);
	GaussianBlur(equalLena_salt, result_5salt, Size(5, 5), 10, 20);
	GaussianBlur(equalLena_salt, result_9salt, Size(9, 9), 10, 20);
	//显示不含噪声图像
	imshow("equalLena ", equalLena);
	imshow("result_5", result_5);
	imshow("result_9", result_9);
	//显示含有高斯噪声图像
	imshow("equalLena_gauss", equalLena_gauss);
	imshow("result_5gauss", result_5gauss);
	imshow("result_9gauss", result_9gauss);
	//显示含有椒盐噪声图像
	imshow("equalLena_salt", equalLena_salt);
	imshow("result_5salt", result_5salt);
	imshow("result_9salt", result_9salt);
	waitKey(0);
	return 0;
}

5.3.4 可分离滤波

sepFilter2D()

filter2D()

代码清单5-17 可分离图像滤波

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //更改输出界面颜色
	float points[25] = { 1,2,3,4,5,
		6,7,8,9,10,
		11,12,13,14,15,
		16,17,18,19,20,
		21,22,23,24,25 };
	Mat data(5, 5, CV_32FC1, points);
	//X方向、Y方向和联合滤波器的构建
	Mat a = (Mat_<float>(3, 1) << -1, 3, -1);
	Mat b = a.reshape(1, 1);
	Mat ab = a*b;
	//验证高斯滤波的可分离性
	Mat gaussX = getGaussianKernel(3, 1);
	Mat gaussData, gaussDataXY;
	GaussianBlur(data, gaussData, Size(3, 3), 1, 1, BORDER_CONSTANT);
	sepFilter2D(data, gaussDataXY, -1, gaussX, gaussX, Point(-1, -1), 0, BORDER_CONSTANT);
	//输入两种高斯滤波的计算结果
	cout << "gaussData=" << endl
		<< gaussData << endl;
	cout << "gaussDataXY=" << endl
		<< gaussDataXY << endl;
	//线性滤波的可分离性
	Mat dataYX, dataY, dataXY, dataXY_sep;
	filter2D(data, dataY, -1, a, Point(-1, -1), 0, BORDER_CONSTANT);
	filter2D(dataY, dataYX, -1, b, Point(-1, -1), 0, BORDER_CONSTANT);
	filter2D(data, dataXY, -1, ab, Point(-1, -1), 0, BORDER_CONSTANT);
	sepFilter2D(data, dataXY_sep, -1, b, b, Point(-1, -1), 0, BORDER_CONSTANT);
	//输出分离滤波和联合滤波的计算结果
	cout << "dataY=" << endl
		<< dataY << endl;
	cout << "dataYX=" << endl
		<< dataYX << endl;
	cout << "dataXY=" << endl
		<< dataXY << endl;
	cout << "dataXY_sep=" << endl
		<< dataXY_sep << endl;
	//对图像的分离操作
	Mat img = imread("lena.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat imgYX, imgY, imgXY;
	filter2D(img, imgY, -1, a, Point(-1, -1), 0, BORDER_CONSTANT);
	filter2D(imgY, imgYX, -1, b, Point(-1, -1), 0, BORDER_CONSTANT);
	filter2D(img, imgXY, -1, ab, Point(-1, -1), 0, BORDER_CONSTANT);
	imshow("img", img);
	imshow("imgY", imgY);
	imshow("imgYX", imgYX);
	imshow("imgXY", imgXY);
	waitKey(0);
	return 0;
}

5.4 非线性滤波

5.4.1 中值滤波

medianBlur()

代码清单5-19 中值滤波

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat gray = imread("equalLena_salt.png", IMREAD_ANYCOLOR);
	Mat img = imread("lena_salt.png", IMREAD_ANYCOLOR);
	if (gray.empty() || img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat imgResult3, grayResult3, imgResult9, grayResult9;
	//分别对含有椒盐噪声的彩色和灰度图像进行滤波,滤波模板为3×3
	medianBlur(img, imgResult3, 3);
	medianBlur(gray, grayResult3, 3);
	//加大滤波模板,图像滤波结果会变模糊
	medianBlur(img, imgResult9, 9);
	medianBlur(gray, grayResult9, 9);
	//显示滤波处理结果
	imshow("img", img);
	imshow("gray", gray);
	imshow("imgResult3", imgResult3);
	imshow("grayResult3", grayResult3);
	imshow("imgResult9", imgResult9);
	imshow("grayResult9", grayResult9);
	waitKey(0);
	return 0;
}

5.4.2 双边滤波

bilateralFilter()

代码清单5-21 人脸图像双边滤波

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	//读取两张含有人脸的图像
	Mat img1 = imread("img1.png", IMREAD_ANYCOLOR);
	Mat img2 = imread("img2.png", IMREAD_ANYCOLOR);
	if (img1.empty() || img2.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat result1, result2, result3, result4;
	//验证不同滤波器直径的滤波效果
	bilateralFilter(img1, result1, 9, 50, 25 / 2);
	bilateralFilter(img1, result2, 25, 50, 25 / 2);
	//验证不同标准差值的滤波效果
	bilateralFilter(img2, result3, 9, 9, 9);
	bilateralFilter(img2, result4, 9, 200, 200);
	//显示原图
	imshow("img1", img1);
	imshow("img2", img2);
	//不同直径滤波结果
	imshow("result1", result1);
	imshow("result2", result2);
	//不同标准差值滤波结果
	imshow("result3 ", result3);
	imshow("result4", result4);
	waitKey(0);
	return 0;
}

5.5 图像的边缘检测

5.5.1 边缘检测原理

convertScaleAbs()

代码清单5-23 图像边缘检测

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	//创建边缘检测滤波器
	Mat kernel1 = (Mat_<float>(1, 2) << 1, -1);  //X方向边缘检测滤波器
	Mat kernel2 = (Mat_<float>(1, 3) << 1, 0, -1);  //X方向边缘检测滤波器
	Mat kernel3 = (Mat_<float>(3, 1) << 1, 0, -1);  //X方向边缘检测滤波器
	Mat kernelXY = (Mat_<float>(2, 2) << 1, 0, 0, -1);  //由左上到右下方向边缘检测滤波器
	Mat kernelYX = (Mat_<float>(2, 2) << 0, -1, 1, 0);  //由右上到左下方向边缘检测滤波器

														//读取图像,黑白图像边缘检测结果较为明显
	Mat img = imread("equalLena.png", IMREAD_ANYCOLOR);
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat result1, result2, result3, result4, result5, result6;

	//检测图像边缘
	//以[1 -1]检测水平方向边缘
	filter2D(img, result1, CV_16S, kernel1);
	convertScaleAbs(result1, result1);

	//以[1 0 -1]检测水平方向边缘
	filter2D(img, result2, CV_16S, kernel2);
	convertScaleAbs(result2, result2);

	//以[1 0 -1]'检测由垂直方向边缘
	filter2D(img, result3, CV_16S, kernel3);
	convertScaleAbs(result3, result3);

	//整幅图像的边缘
	result6 = result2 + result3;
	//检测由左上到右下方向边缘
	filter2D(img, result4, CV_16S, kernelXY);
	convertScaleAbs(result4, result4);

	//检测由右上到左下方向边缘
	filter2D(img, result5, CV_16S, kernelYX);
	convertScaleAbs(result5, result5);

	//显示边缘检测结果
	imshow("result1", result1);
	imshow("result2", result2);
	imshow("result3", result3);
	imshow("result4", result4);
	imshow("result5", result5);
	imshow("result6", result6);
	waitKey(0);
	return 0;
}

5.5.2 Sobel算子

Sobel()

代码清单5-25 图像Sobel边缘提取

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	//读取图像,黑白图像边缘检测结果较为明显
	Mat img = imread("equalLena.png", IMREAD_ANYCOLOR);
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat resultX, resultY, resultXY;

	//X方向一阶边缘
	Sobel(img, resultX, CV_16S, 2, 0, 1);
	convertScaleAbs(resultX, resultX);

	//Y方向一阶边缘
	Sobel(img, resultY, CV_16S, 0, 1, 3);
	convertScaleAbs(resultY, resultY);

	//整幅图像的一阶边缘
	resultXY = resultX + resultY;

	//显示图像
	imshow("resultX", resultX);
	imshow("resultY", resultY);
	imshow("resultXY", resultXY);
	waitKey(0);
	return 0;
}

5.5.3 Scharr算子

Scharr()

代码清单5-27 图像Scharr边缘提取

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	//读取图像,黑白图像边缘检测结果较为明显
	Mat img = imread("equalLena.png", IMREAD_ANYDEPTH);
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat resultX, resultY, resultXY;

	//X方向一阶边缘
	Scharr(img, resultX, CV_16S, 1, 0);
	convertScaleAbs(resultX, resultX);

	//Y方向一阶边缘
	Scharr(img, resultY, CV_16S, 0, 1);
	convertScaleAbs(resultY, resultY);

	//整幅图像的一阶边缘
	resultXY = resultX + resultY;

	//显示图像
	imshow("resultX", resultX);
	imshow("resultY", resultY);
	imshow("resultXY", resultXY);
	waitKey(0);
	return 0;
}

5.5.4 生成边缘检测滤波器

getDerivKernels()

代码清单5-29 计算Sobel算子和Scharr算子

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //更改输出界面颜色
	Mat sobel_x1, sobel_y1, sobel_x2, sobel_y2, sobel_x3, sobel_y3;  //存放分离的Sobel算子
	Mat scharr_x, scharr_y;  //存放分离的Scharr算子
	Mat sobelX1, sobelX2, sobelX3, scharrX;  //存放最终算子

											 //一阶X方向Sobel算子
	getDerivKernels(sobel_x1, sobel_y1, 1, 0, 3);
	sobel_x1 = sobel_x1.reshape(CV_8U, 1);
	sobelX1 = sobel_y1*sobel_x1;  //计算滤波器

								  //二阶X方向Sobel算子
	getDerivKernels(sobel_x2, sobel_y2, 2, 0, 5);
	sobel_x2 = sobel_x2.reshape(CV_8U, 1);
	sobelX2 = sobel_y2*sobel_x2;  //计算滤波器

								  //三阶X方向Sobel算子
	getDerivKernels(sobel_x3, sobel_y3, 3, 0, 7);
	sobel_x3 = sobel_x3.reshape(CV_8U, 1);
	sobelX3 = sobel_y3*sobel_x3;  //计算滤波器

								  //X方向Scharr算子
	getDerivKernels(scharr_x, scharr_y, 1, 0, FILTER_SCHARR);
	scharr_x = scharr_x.reshape(CV_8U, 1);
	scharrX = scharr_y*scharr_x;  //计算滤波器

								  //输出结果
	cout << "X方向一阶Sobel算子:" << endl << sobelX1 << endl;
	cout << "X方向二阶Sobel算子:" << endl << sobelX2 << endl;
	cout << "X方向三阶Sobel算子:" << endl << sobelX3 << endl;
	cout << "X方向Scharr算子:" << endl << scharrX << endl;
	waitKey(0);
	return 0;
}

5.5.5 Laplacian算子

Laplacian()

代码清单5-31 利用Laplacian算子检测图像边缘

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	//读取图像,黑白图像边缘检测结果较为明显
	Mat img = imread("equalLena.png", IMREAD_ANYDEPTH);
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat result, result_g, result_G;

	//未滤波提取边缘
	Laplacian(img, result, CV_16S, 3, 1, 0);
	convertScaleAbs(result, result);

	//滤波后提取Laplacian边缘
	GaussianBlur(img, result_g, Size(3, 3), 5, 0);  //高斯滤波
	Laplacian(result_g, result_G, CV_16S, 3, 1, 0);
	convertScaleAbs(result_G, result_G);

	//显示图像
	imshow("result", result);
	imshow("result_G", result_G);
	waitKey(0);
	return 0;
}

5.5.6 Canny算法

Canny()

代码清单5-3 利用Canny算法提取图像边缘

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	//读取图像,黑白图像边缘检测结果较为明显
	Mat img = imread("equalLena.png", IMREAD_ANYDEPTH);
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat resultHigh, resultLow, resultG;

	//大阈值检测图像边缘
	Canny(img, resultHigh, 100, 200, 3);

	//小阈值检测图像边缘
	Canny(img, resultLow, 20, 40, 3);

	//高斯模糊后检测图像边缘
	GaussianBlur(img, resultG, Size(3, 3), 5);
	Canny(resultG, resultG, 100, 200, 3);

	//显示图像
	imshow("resultHigh", resultHigh);
	imshow("resultLow", resultLow);
	imshow("resultG", resultG);
	waitKey(0);
	return 0;
}

第六章 图像形态学操作

6.1 像素距离与连通域

6.1.1 图像像素距离变换

distanceTransform()

代码清单6-3 图像距离变换

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	//构建建议矩阵,用于求取像素之间的距离
	Mat a = (Mat_<uchar>(5, 5) << 1, 1, 1, 1, 1,
		1, 1, 1, 1, 1,
		1, 1, 0, 1, 1,
		1, 1, 1, 1, 1,
		1, 1, 1, 1, 1);
	Mat dist_L1, dist_L2, dist_C, dist_L12;

	//计算街区距离
	distanceTransform(a, dist_L1, 1, 3, CV_8U);
	cout << "街区距离:" << endl << dist_L1 << endl;

	//计算欧式距离
	distanceTransform(a, dist_L2, 2, 5, CV_8U);
	cout << "欧式距离:" << endl << dist_L2 << endl;

	//计算棋盘距离
	distanceTransform(a, dist_C, 3, 5, CV_8U);
	cout << "棋盘距离:" << endl << dist_C << endl;

	//对图像进行距离变换
	Mat rice = imread("rice.png", IMREAD_GRAYSCALE);
	if (rice.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat riceBW, riceBW_INV;

	//将图像转成二值图像,同时把黑白区域颜色呼唤
	threshold(rice, riceBW, 50, 255, THRESH_BINARY);
	threshold(rice, riceBW_INV, 50, 255, THRESH_BINARY_INV);

	//距离变换
	Mat dist, dist_INV;
	distanceTransform(riceBW, dist, 1, 3, CV_32F);  //为了显示清晰,将数据类型变成CV_32F
	distanceTransform(riceBW_INV, dist_INV, 1, 3, CV_8U);

	//显示变换结果
	imshow("riceBW", riceBW);
	imshow("dist", dist);
	imshow("riceBW_INV", riceBW_INV);
	imshow("dist_INV", dist_INV);

	waitKey(0);
	return 0;
}

6.1.2 图像连通域分析

connectedComponents()

代码清单6-8 图像连通域计算

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	//对图像进行距离变换
	Mat img = imread("rice.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat rice, riceBW;

	//将图像转成二值图像,用于统计连通域
	cvtColor(img, rice, COLOR_BGR2GRAY);
	threshold(rice, riceBW, 50, 255, THRESH_BINARY);

	//生成随机颜色,用于区分不同连通域
	RNG rng(10086);
	Mat out;
	int number = connectedComponents(riceBW, out, 8, CV_16U);  //统计图像中连通域的个数
	vector<Vec3b> colors;
	for (int i = 0; i < number; i++)
	{
		//使用均匀分布的随机数确定颜色
		Vec3b vec3 = Vec3b(rng.uniform(0, 256), rng.uniform(0, 256), rng.uniform(0, 256));
		colors.push_back(vec3);
	}

	//以不同颜色标记出不同的连通域
	Mat result = Mat::zeros(rice.size(), img.type());
	int w = result.cols;
	int h = result.rows;
	for (int row = 0; row < h; row++)
	{
		for (int col = 0; col < w; col++)
		{
			int label = out.at<uint16_t>(row, col);
			if (label == 0)  //背景的黑色不改变
			{
				continue;
			}
			result.at<Vec3b>(row, col) = colors[label];
		}
	}

	//显示结果
	imshow("原图", img);
	imshow("标记后的图像", result);

	waitKey(0);
	return 0;
}

connectedComponentsWithStats()

代码清单6-9 连通域信息统计

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //更改输出界面颜色
	//对图像进行距离变换
	Mat img = imread("rice.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	imshow("原图", img);
	Mat rice, riceBW;

	//将图像转成二值图像,用于统计连通域
	cvtColor(img, rice, COLOR_BGR2GRAY);
	threshold(rice, riceBW, 50, 255, THRESH_BINARY);

	//生成随机颜色,用于区分不同连通域
	RNG rng(10086);
	Mat out, stats, centroids;
	//统计图像中连通域的个数
	int number = connectedComponentsWithStats(riceBW, out, stats, centroids, 8, CV_16U);
	vector<Vec3b> colors;
	for (int i = 0; i < number; i++)
	{
		//使用均匀分布的随机数确定颜色
		Vec3b vec3 = Vec3b(rng.uniform(0, 256), rng.uniform(0, 256), rng.uniform(0, 256));
		colors.push_back(vec3);
	}

	//以不同颜色标记出不同的连通域
	Mat result = Mat::zeros(rice.size(), img.type());
	int w = result.cols;
	int h = result.rows;
	for (int i = 1; i < number; i++)
	{
		// 中心位置
		int center_x = centroids.at<double>(i, 0);
		int center_y = centroids.at<double>(i, 1);
		//矩形边框
		int x = stats.at<int>(i, CC_STAT_LEFT);
		int y = stats.at<int>(i, CC_STAT_TOP);
		int w = stats.at<int>(i, CC_STAT_WIDTH);
		int h = stats.at<int>(i, CC_STAT_HEIGHT);
		int area = stats.at<int>(i, CC_STAT_AREA);

		// 中心位置绘制
		circle(img, Point(center_x, center_y), 2, Scalar(0, 255, 0), 2, 8, 0);
		// 外接矩形
		Rect rect(x, y, w, h);
		rectangle(img, rect, colors[i], 1, 8, 0);
		putText(img, format("%d", i), Point(center_x, center_y),
			FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 255), 1);
		cout << "number: " << i << ",area: " << area << endl;
	}
	//显示结果
	imshow("标记后的图像", img);

	waitKey(0);
	return 0;
}

6.2 腐蚀与膨胀

6.2.1 图像腐蚀

erode()

getStructuringElement()

代码清单6-12 图像腐蚀

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;
//绘制包含区域函数
void drawState(Mat &img, int number, Mat centroids, Mat stats, String str) {
	RNG rng(10086);
	vector<Vec3b> colors;
	for (int i = 0; i < number; i++)
	{
		//使用均匀分布的随机数确定颜色
		Vec3b vec3 = Vec3b(rng.uniform(0, 256), rng.uniform(0, 256), rng.uniform(0, 256));
		colors.push_back(vec3);
	}

	for (int i = 1; i < number; i++)
	{
		// 中心位置
		int center_x = centroids.at<double>(i, 0);
		int center_y = centroids.at<double>(i, 1);
		//矩形边框
		int x = stats.at<int>(i, CC_STAT_LEFT);
		int y = stats.at<int>(i, CC_STAT_TOP);
		int w = stats.at<int>(i, CC_STAT_WIDTH);
		int h = stats.at<int>(i, CC_STAT_HEIGHT);

		// 中心位置绘制
		circle(img, Point(center_x, center_y), 2, Scalar(0, 255, 0), 2, 8, 0);
		// 外接矩形
		Rect rect(x, y, w, h);
		rectangle(img, rect, colors[i], 1, 8, 0);
		putText(img, format("%d", i), Point(center_x, center_y),
			FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 255), 1);
	}
	imshow(str, img);
}

int main()
{
	//生成用于腐蚀的原图像
	Mat src = (Mat_<uchar>(6, 6) << 0, 0, 0, 0, 255, 0,
		0, 255, 255, 255, 255, 255,
		0, 255, 255, 255, 255, 0,
		0, 255, 255, 255, 255, 0,
		0, 255, 255, 255, 255, 0,
		0, 0, 0, 0, 0, 0);
	Mat struct1, struct2;
	struct1 = getStructuringElement(0, Size(3, 3));  //矩形结构元素
	struct2 = getStructuringElement(1, Size(3, 3));  //十字结构元素

	Mat erodeSrc;  //存放腐蚀后的图像
	erode(src, erodeSrc, struct2);
	namedWindow("src", WINDOW_GUI_NORMAL);
	namedWindow("erodeSrc", WINDOW_GUI_NORMAL);
	imshow("src", src);
	imshow("erodeSrc", erodeSrc);

	Mat LearnCV_black = imread("LearnCV_black.png", IMREAD_ANYCOLOR);
	Mat LearnCV_write = imread("LearnCV_write.png", IMREAD_ANYCOLOR);
	Mat erode_black1, erode_black2, erode_write1, erode_write2;
	//黑背景图像腐蚀
	erode(LearnCV_black, erode_black1, struct1);
	erode(LearnCV_black, erode_black2, struct2);
	imshow("LearnCV_black", LearnCV_black);
	imshow("erode_black1", erode_black1);
	imshow("erode_black2", erode_black2);

	//白背景腐蚀
	erode(LearnCV_write, erode_write1, struct1);
	erode(LearnCV_write, erode_write2, struct2);
	imshow("LearnCV_write", LearnCV_write);
	imshow("erode_write1", erode_write1);
	imshow("erode_write2", erode_write2);

	//验证腐蚀对小连通域的去除
	Mat img = imread("rice.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat img2;
	copyTo(img, img2, img);  //克隆一个单独的图像,用于后期图像绘制
	Mat rice, riceBW;

	//将图像转成二值图像,用于统计连通域
	cvtColor(img, rice, COLOR_BGR2GRAY);
	threshold(rice, riceBW, 50, 255, THRESH_BINARY);

	Mat out, stats, centroids;
	//统计图像中连通域的个数
	int number = connectedComponentsWithStats(riceBW, out, stats, centroids, 8, CV_16U);
	drawState(img, number, centroids, stats, "未腐蚀时统计连通域");  //绘制图像

	erode(riceBW, riceBW, struct1);  //对图像进行腐蚀
	number = connectedComponentsWithStats(riceBW, out, stats, centroids, 8, CV_16U);
	drawState(img2, number, centroids, stats, "腐蚀后统计连通域");  //绘制图像

	waitKey(0);
	return 0;
}

6.2.2 图像膨胀

dilate()

getStructuringElement()

代码清单6-14 图像膨胀

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	//生成用于腐蚀的原图像
	Mat src = (Mat_<uchar>(6, 6) << 0, 0, 0, 0, 255, 0,
		0, 255, 255, 255, 255, 255,
		0, 255, 255, 255, 255, 0,
		0, 255, 255, 255, 255, 0,
		0, 255, 255, 255, 255, 0,
		0, 0, 0, 0, 0, 0);
	Mat struct1, struct2;
	struct1 = getStructuringElement(0, Size(3, 3));  //矩形结构元素
	struct2 = getStructuringElement(1, Size(3, 3));  //十字结构元素

	Mat erodeSrc;  //存放膨胀后的图像
	dilate(src, erodeSrc, struct2);
	namedWindow("src", WINDOW_GUI_NORMAL);
	namedWindow("dilateSrc", WINDOW_GUI_NORMAL);
	imshow("src", src);
	imshow("dilateSrc", erodeSrc);

	Mat LearnCV_black = imread("LearnCV_black.png", IMREAD_ANYCOLOR);
	Mat LearnCV_write = imread("LearnCV_write.png", IMREAD_ANYCOLOR);
	if (LearnCV_black.empty() || LearnCV_write.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}

	Mat dilate_black1, dilate_black2, dilate_write1, dilate_write2;
	//黑背景图像膨胀
	dilate(LearnCV_black, dilate_black1, struct1);
	dilate(LearnCV_black, dilate_black2, struct2);
	imshow("LearnCV_black", LearnCV_black);
	imshow("dilate_black1", dilate_black1);
	imshow("dilate_black2", dilate_black2);

	//白背景图像膨胀
	dilate(LearnCV_write, dilate_write1, struct1);
	dilate(LearnCV_write, dilate_write2, struct2);
	imshow("LearnCV_write", LearnCV_write);
	imshow("dilate_write1", dilate_write1);
	imshow("dilate_write2", dilate_write2);

	//比较膨胀和腐蚀的结果
	Mat erode_black1, resultXor, resultAnd;
	erode(LearnCV_black, erode_black1, struct1);
	bitwise_xor(erode_black1, dilate_write1, resultXor);
	bitwise_and(erode_black1, dilate_write1, resultAnd);
	imshow("resultXor", resultXor);
	imshow("resultAnd", resultAnd);
	waitKey(0);
	return 0;
}

6.3 形态学应用

6.3.1 开运算

morphologyEx()

6.3.2 闭运算

morphologyEx()

6.3.3 形态学梯度

morphologyEx()

6.3.4 顶帽运算

morphologyEx()

6.3.5 黑帽运算

morphologyEx()

6.3.6 击中击不中变换

morphologyEx()

代码清单6-16 形态学操作应用

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	//用于验证形态学应用的二值化矩阵
	Mat src = (Mat_<uchar>(9, 12) << 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
		0, 255, 255, 255, 255, 255, 255, 255, 0, 0, 255, 0,
		0, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0,
		0, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0,
		0, 255, 255, 255, 0, 255, 255, 255, 0, 0, 0, 0,
		0, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0,
		0, 255, 255, 255, 255, 255, 255, 255, 0, 0, 255, 0,
		0, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0,
		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
	namedWindow("src", WINDOW_NORMAL);  //可以自由调节显示图像的尺寸
	imshow("src", src);
	//3×3矩形结构元素
	Mat kernel = getStructuringElement(0, Size(3, 3));

	//对二值化矩阵进行形态学操作
	Mat open, close, gradient, tophat, blackhat, hitmiss;

	//对二值化矩阵进行开运算
	morphologyEx(src, open, MORPH_OPEN, kernel);
	namedWindow("open", WINDOW_NORMAL);  //可以自由调节显示图像的尺寸
	imshow("open", open);

	//对二值化矩阵进行闭运算
	morphologyEx(src, close, MORPH_CLOSE, kernel);
	namedWindow("close", WINDOW_NORMAL);  //可以自由调节显示图像的尺寸
	imshow("close", close);

	//对二值化矩阵进行梯度运算
	morphologyEx(src, gradient, MORPH_GRADIENT, kernel);
	namedWindow("gradient", WINDOW_NORMAL);  //可以自由调节显示图像的尺寸
	imshow("gradient", gradient);

	//对二值化矩阵进行顶帽运算
	morphologyEx(src, tophat, MORPH_TOPHAT, kernel);
	namedWindow("tophat", WINDOW_NORMAL);  //可以自由调节显示图像的尺寸
	imshow("tophat", tophat);

	//对二值化矩阵进行黑帽运算
	morphologyEx(src, blackhat, MORPH_BLACKHAT, kernel);
	namedWindow("blackhat", WINDOW_NORMAL);   //可以自由调节显示图像的尺寸
	imshow("blackhat", blackhat);

	//对二值化矩阵进行击中击不中变换
	morphologyEx(src, hitmiss, MORPH_HITMISS, kernel);
	namedWindow("hitmiss", WINDOW_NORMAL);  //可以自由调节显示图像的尺寸
	imshow("hitmiss", hitmiss);

	//用图像验证形态学操作效果
	Mat keys = imread("keys.jpg",IMREAD_GRAYSCALE);
	imshow("原图像", keys);
	threshold(keys, keys, 80, 255, THRESH_BINARY);
	imshow("二值化后的keys", keys);

	//5×5矩形结构元素
	Mat kernel_keys = getStructuringElement(0, Size(5, 5));
	Mat open_keys, close_keys, gradient_keys, tophat_keys, blackhat_keys, hitmiss_keys;

	//对图像进行开运算
	morphologyEx(keys, open_keys, MORPH_OPEN, kernel_keys);
	imshow("open_keys", open_keys);

	//对图像进行闭运算
	morphologyEx(keys, close_keys, MORPH_CLOSE, kernel_keys);
	imshow("close_keys", close_keys);

	//对图像进行梯度运算
	morphologyEx(keys, gradient_keys, MORPH_GRADIENT, kernel_keys);
	imshow("gradient_keys", gradient_keys);

	//对图像进行顶帽运算
	morphologyEx(keys, tophat_keys, MORPH_TOPHAT, kernel_keys);
	imshow("tophat_keys", tophat_keys);

	//对图像进行黑帽运算
	morphologyEx(keys, blackhat_keys, MORPH_BLACKHAT, kernel_keys);
	imshow("blackhat_keys", blackhat_keys);

	//对图像进行击中击不中变换
	morphologyEx(keys, hitmiss_keys, MORPH_HITMISS, kernel_keys);
	imshow("hitmiss_keys", hitmiss_keys);

	waitKey(0);
	return 0;
}

6.3.7 图像细化

thinning()

ximgproc构造函数

代码清单6-18 图像细化

#include <opencv2\opencv.hpp>
#include <opencv2\ximgproc.hpp>  //细化函数thining所在的头文件
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	//中文字进行细化
	Mat img = imread("LearnCV_black.png", IMREAD_ANYCOLOR);
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	//英文字+实心圆和圆环细化
	Mat words = Mat::zeros(100, 200, CV_8UC1);  //创建一个黑色的背景图片
	putText(words, "Learn", Point(30, 30), 2, 1, Scalar(255), 2);  //添加英文
	putText(words, "OpenCV 4", Point(30, 60), 2, 1, Scalar(255), 2);
	circle(words, Point(80, 75), 10, Scalar(255), -1);  //添加实心圆
	circle(words, Point(130, 75), 10, Scalar(255), 3);  //添加圆环

	//进行细化
	Mat thin1, thin2;
	ximgproc::thinning(img, thin1, 0);  //注意类名
	ximgproc::thinning(words, thin2, 0);

	//显示处理结果
	imshow("thin1", thin1);
	imshow("img", img);
	namedWindow("thin2", WINDOW_NORMAL);
	imshow("thin2", thin2);
	namedWindow("words", WINDOW_NORMAL);
	imshow("words", words);
	waitKey(0);
	return 0;
}

第七章 目标检测

7.1 形状检测

7.1.1 直线检测

HoughLines()

Canny()

line()

代码清单7-2 检测直线并绘制直线

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

void drawLine(Mat &img, //要标记直线的图像
	vector<Vec2f> lines,   //检测的直线数据
	double rows,   //原图像的行数(高)
	double cols,  //原图像的列数(宽)
	Scalar scalar,  //绘制直线的颜色
	int n  //绘制直线的线宽
)
{
	Point pt1, pt2;
	for (size_t i = 0; i < lines.size(); i++)
	{
		float rho = lines[i][0];  //直线距离坐标原点的距离
		float theta = lines[i][1];  //直线过坐标原点垂线与x轴夹角
		double a = cos(theta);  //夹角的余弦值
		double b = sin(theta);  //夹角的正弦值
		double x0 = a*rho, y0 = b*rho;  //直线与过坐标原点的垂线的交点
		double length = max(rows, cols);  //图像高宽的最大值
										  //计算直线上的一点
		pt1.x = cvRound(x0 + length  * (-b));
		pt1.y = cvRound(y0 + length  * (a));
		//计算直线上另一点
		pt2.x = cvRound(x0 - length  * (-b));
		pt2.y = cvRound(y0 - length  * (a));
		//两点绘制一条直线
		line(img, pt1, pt2, scalar, n);
	}
}

int main()
{
	Mat img = imread("HoughLines.jpg", IMREAD_GRAYSCALE);
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat edge;

	//检测边缘图像,并二值化
	Canny(img, edge, 80, 180, 3, false);
	threshold(edge, edge, 170, 255, THRESH_BINARY);

	//用不同的累加器进行检测直线
	vector<Vec2f> lines1, lines2;
	HoughLines(edge, lines1, 1, CV_PI / 180, 50, 0, 0);
	HoughLines(edge, lines2, 1, CV_PI / 180, 150, 0, 0);

	//在原图像中绘制直线
	Mat img1, img2;
	img.copyTo(img1);
	img.copyTo(img2);
	drawLine(img1, lines1, edge.rows, edge.cols, Scalar(255), 2);
	drawLine(img2, lines2, edge.rows, edge.cols, Scalar(255), 2);

	//显示图像
	imshow("edge", edge);
	imshow("img", img);
	imshow("img1", img1);
	imshow("img2", img2);
	waitKey(0);
	return 0;
}

HoughLinesP()

代码清单7-4 检测图像中的现代

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("HoughLines.jpg", IMREAD_GRAYSCALE);
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat edge;

	//检测边缘图像,并二值化
	Canny(img, edge, 80, 180, 3, false);
	threshold(edge, edge, 170, 255, THRESH_BINARY);

	//利用渐进概率式霍夫变换提取直线
	vector<Vec4i> linesP1, linesP2;
	HoughLinesP(edge, linesP1, 1, CV_PI / 180, 150, 30, 10);  //两个点连接最大距离10
	HoughLinesP(edge, linesP2, 1, CV_PI / 180, 150, 30, 30);  //两个点连接最大距离30

															  //绘制两个点连接最大距离10直线检测结果
	Mat img1;
	img.copyTo(img1);
	for (size_t i = 0; i < linesP1.size(); i++)
	{
		line(img1, Point(linesP1[i][0], linesP1[i][1]),
			Point(linesP1[i][2], linesP1[i][3]), Scalar(255), 3);
	}

	//绘制两个点连接最大距离30直线检测结果
	Mat img2;
	img.copyTo(img2);
	for (size_t i = 0; i < linesP2.size(); i++)
	{
		line(img2, Point(linesP2[i][0], linesP2[i][1]),
			Point(linesP2[i][2], linesP2[i][3]), Scalar(255), 3);
	}

	//显示图像
	imshow("img1", img1);
	imshow("img2", img2);
	waitKey(0);
	return 0;
}

HoughLinesPointSet()

代码清单7-6 在二维点集中检测直线

#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //更改输出界面颜色
	Mat lines;  //存放检测直线结果的矩阵
	vector<Vec3d> line3d;  //换一种结果存放形式
	vector<Point2f> point;  //待检测是否存在直线的所有点
	const static float Points[20][2] = {
		{ 0.0f,   369.0f },{ 10.0f,  364.0f },{ 20.0f,  358.0f },{ 30.0f,  352.0f },
		{ 40.0f,  346.0f },{ 50.0f,  341.0f },{ 60.0f,  335.0f },{ 70.0f,  329.0f },
		{ 80.0f,  323.0f },{ 90.0f,  318.0f },{ 100.0f, 312.0f },{ 110.0f, 306.0f },
		{ 120.0f, 300.0f },{ 130.0f, 295.0f },{ 140.0f, 289.0f },{ 150.0f, 284.0f },
		{ 160.0f, 277.0f },{ 170.0f, 271.0f },{ 180.0f, 266.0f },{ 190.0f, 260.0f }
	};
	//将所有点存放在vector中,用于输入函数中
	for (int i = 0; i < 20; i++)
	{
		point.push_back(Point2f(Points[i][0], Points[i][1]));
	}
	//参数设置
	double rhoMin = 0.0f;  //最小长度
	double rhoMax = 360.0f;  //最大长度
	double rhoStep = 1;  //离散化单位距离长度
	double thetaMin = 0.0f;  //最小角度
	double thetaMax = CV_PI / 2.0f;  //最大角度
	double thetaStep = CV_PI / 180.0f;  离散化单位角度弧度
	HoughLinesPointSet(point, lines, 20, 1, rhoMin, rhoMax, rhoStep,
		thetaMin, thetaMax, thetaStep);
	lines.copyTo(line3d);

	//输出结果
	for (int i = 0; i < line3d.size(); i++)
	{
		cout << "votes:" << (int)line3d.at(i).val[0] << ", "
			<< "rho:" << line3d.at(i).val[1] << ", "
			<< "theta:" << line3d.at(i).val[2] << endl;
	}
	return 0;
}

7.1.2 直线拟合

fitLine()

代码清单7-8 直线拟合

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //更改输出界面颜色
	Vec4f lines;  //存放你和后的直线
	vector<Point2f> point;  //待检测是否存在直线的所有点
	const static float Points[20][2] = {
		{ 0.0f,   0.0f },{ 10.0f,  11.0f },{ 21.0f,  20.0f },{ 30.0f,  30.0f },
		{ 40.0f,  42.0f },{ 50.0f,  50.0f },{ 60.0f,  60.0f },{ 70.0f,  70.0f },
		{ 80.0f,  80.0f },{ 90.0f,  92.0f },{ 100.0f, 100.0f },{ 110.0f, 110.0f },
		{ 120.0f, 120.0f },{ 136.0f, 130.0f },{ 138.0f, 140.0f },{ 150.0f, 150.0f },
		{ 160.0f, 163.0f },{ 175.0f, 170.0f },{ 181.0f, 180.0f },{ 200.0f, 190.0f }
	};
	//将所有点存放在vector中,用于输入函数中
	for (int i = 0; i < 20; i++)
	{
		point.push_back(Point2f(Points[i][0], Points[i][1]));
	}
	//参数设置
	double param = 0;  //距离模型中的数值参数C
	double reps = 0.01;  //坐标原点与直线之间的距离精度
	double aeps = 0.01;  //角度精度
	fitLine(point, lines, DIST_L1, 0, 0.01, 0.01);
	double k = lines[1] / lines[0];  //直线斜率
	cout << "直线斜率:" << k << endl;
	cout << "直线上一点坐标x:" << lines[2] << ", y::" << lines[3] << endl;
	cout << "直线解析式:y=" << k << "(x-" << lines[2] << ")+" << lines[3] << endl;
	return 0;
}

7.1.3 圆形检测

HoughCircles()

代码清单7-10 圆形检测

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("keys.jpg");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	imshow("原图", img);
	Mat gray;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	GaussianBlur(gray, gray, Size(9, 9), 2, 2);  //平滑滤波

												 //检测圆形
	vector<Vec3f> circles;
	double dp = 2; //
	double minDist = 10;  //两个圆心之间的最小距离
	double	param1 = 100;  //Canny边缘检测的较大阈值
	double	param2 = 100;  //累加器阈值
	int min_radius = 20;  //圆形半径的最小值
	int max_radius = 100;  //圆形半径的最大值
	HoughCircles(gray, circles, HOUGH_GRADIENT, dp, minDist, param1, param2,
		min_radius, max_radius);

	//图像中标记出圆形
	for (size_t i = 0; i < circles.size(); i++)
	{
		//读取圆心
		Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
		//读取半径
		int radius = cvRound(circles[i][2]);
		//绘制圆心
		circle(img, center, 3, Scalar(0, 255, 0), -1, 8, 0);
		//绘制圆
		circle(img, center, radius, Scalar(0, 0, 255), 3, 8, 0);
	}

	//显示结果
	imshow("圆检测结果", img);
	waitKey(0);
	return 0;
}

7.2 轮廓检测

findContours()

drawContours()

7.2.1 轮廓发现与绘制

代码清单7-14 轮廓发现与绘制

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //更改输出界面颜色
	Mat img = imread("keys.jpg");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	imshow("原图", img);
	Mat gray, binary;
	cvtColor(img, gray, COLOR_BGR2GRAY);  //转化成灰度图
	GaussianBlur(gray, gray, Size(13, 13), 4, 4);  //平滑滤波
	threshold(gray, binary, 170, 255, THRESH_BINARY | THRESH_OTSU);  //自适应二值化

																	 // 轮廓发现与绘制
	vector<vector<Point>> contours;  //轮廓
	vector<Vec4i> hierarchy;  //存放轮廓结构变量
	findContours(binary, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point());
	//绘制轮廓
	for (int t = 0; t < contours.size(); t++)
	{
		drawContours(img, contours, t, Scalar(0, 0, 255), 2, 8);
	}
	//输出轮廓结构描述子
	for (int i = 0; i < hierarchy.size(); i++)
	{
		cout << hierarchy[i] << endl;
	}

	//显示结果
	imshow("轮廓检测结果", img);
	waitKey(0);
	return 0;
}

7.2.2 轮廓面积

contourArea()

代码清单7-16 计算轮廓面积

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //更改输出界面颜色
	//用四个点表示三角形轮廓
	vector<Point> contour;
	contour.push_back(Point2f(0, 0));
	contour.push_back(Point2f(10, 0));
	contour.push_back(Point2f(10, 10));
	contour.push_back(Point2f(5, 5));
	double area = contourArea(contour);
	cout << "area =" << area << endl;

	Mat img = imread("coins.jpg");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	imshow("原图", img);
	Mat gray, binary;
	cvtColor(img, gray, COLOR_BGR2GRAY);  //转化成灰度图
	GaussianBlur(gray, gray, Size(9, 9), 2, 2);  //平滑滤波
	threshold(gray, binary, 170, 255, THRESH_BINARY | THRESH_OTSU);  //自适应二值化

																	 // 轮廓检测
	vector<vector<Point>> contours;  //轮廓
	vector<Vec4i> hierarchy;  //存放轮廓结构变量
	findContours(binary, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point());

	//输出轮廓面积
	for (int t = 0; t < contours.size(); t++)
	{
		double area1 = contourArea(contours[t]);
		cout << "第" << t << "轮廓面积=" << area1 << endl;
	}
	return 0;
}

7.2.3 轮廓长度(周长)

arcLength()

代码清单7-18 计算轮廓长度

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //更改输出界面颜色
	//用四个点表示三角形轮廓
	vector<Point> contour;
	contour.push_back(Point2f(0, 0));
	contour.push_back(Point2f(10, 0));
	contour.push_back(Point2f(10, 10));
	contour.push_back(Point2f(5, 5));

	double length0 = arcLength(contour, true);
	double length1 = arcLength(contour, false);
	cout << "length0 =" << length0 << endl;
	cout << "length1 =" << length1 << endl;

	Mat img = imread("coins.jpg");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	imshow("原图", img);
	Mat gray, binary;
	cvtColor(img, gray, COLOR_BGR2GRAY);  //转化成灰度图
	GaussianBlur(gray, gray, Size(9, 9), 2, 2);  //平滑滤波
	threshold(gray, binary, 170, 255, THRESH_BINARY | THRESH_OTSU);  //自适应二值化

																	 // 轮廓检测
	vector<vector<Point>> contours;  //轮廓
	vector<Vec4i> hierarchy;  //存放轮廓结构变量
	findContours(binary, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point());

	//输出轮廓长度
	for (int t = 0; t < contours.size(); t++)
	{
		double length2 = arcLength(contours[t], true);
		cout << "第" << t << "个轮廓长度=" << length2 << endl;
	}
	return 0;
}

7.2.4 轮廓外接多边形

boundingRect()

minAreaRect()

代码清单7-21 计算轮廓外接矩形

#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("stuff.jpg");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat img1, img2;
	img.copyTo(img1);  //深拷贝用来绘制最大外接矩形
	img.copyTo(img2);  //深拷贝用来绘制最小外接矩形
	imshow("img", img);

	// 去噪声与二值化
	Mat canny;
	Canny(img, canny, 80, 160, 3, false);
	imshow("", canny);

	//膨胀运算,将细小缝隙填补上
	Mat kernel = getStructuringElement(0, Size(3, 3));
	dilate(canny, canny, kernel);

	// 轮廓发现与绘制
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(canny, contours, hierarchy, 0, 2, Point());

	//寻找轮廓的外接矩形
	for (int n = 0; n < contours.size(); n++)
	{
		// 最大外接矩形
		Rect rect = boundingRect(contours[n]);
		rectangle(img1, rect, Scalar(0, 0, 255), 2, 8, 0);

		// 最小外接矩形
		RotatedRect rrect = minAreaRect(contours[n]);
		Point2f points[4];
		rrect.points(points);  //读取最小外接矩形的四个顶点
		Point2f cpt = rrect.center;  //最小外接矩形的中心

									 // 绘制旋转矩形与中心位置
		for (int i = 0; i < 4; i++)
		{
			if (i == 3)
			{
				line(img2, points[i], points[0], Scalar(0, 255, 0), 2, 8, 0);
				break;
			}
			line(img2, points[i], points[i + 1], Scalar(0, 255, 0), 2, 8, 0);
		}
		//绘制矩形的中心
		circle(img, cpt, 2, Scalar(255, 0, 0), 2, 8, 0);
	}

	//输出绘制外接矩形的结果
	imshow("max", img1);
	imshow("min", img2);
	waitKey(0);
	return 0;
}

approxPolyDP()

代码清单7-23 对多个轮廓进行多边形逼近

#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

//绘制轮廓函数
void drawapp(Mat result, Mat img2)
{
	for (int i = 0; i < result.rows; i++)
	{
		//最后一个坐标点与第一个坐标点连接
		if (i == result.rows - 1)
		{
			Vec2i point1 = result.at<Vec2i>(i);
			Vec2i point2 = result.at<Vec2i>(0);
			line(img2, point1, point2, Scalar(0, 0, 255), 2, 8, 0);
			break;
		}
		Vec2i point1 = result.at<Vec2i>(i);
		Vec2i point2 = result.at<Vec2i>(i + 1);
		line(img2, point1, point2, Scalar(0, 0, 255), 2, 8, 0);
	}
}

int main(int argc, const char *argv[])
{
	Mat img = imread("approx.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	// 边缘检测
	Mat canny;
	Canny(img, canny, 80, 160, 3, false);
	//膨胀运算
	Mat kernel = getStructuringElement(0, Size(3, 3));
	dilate(canny, canny, kernel);

	// 轮廓发现与绘制
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(canny, contours, hierarchy, 0, 2, Point());

	//绘制多边形
	for (int t = 0; t < contours.size(); t++)
	{
		//用最小外接矩形求取轮廓中心
		RotatedRect rrect = minAreaRect(contours[t]);
		Point2f center = rrect.center;
		circle(img, center, 2, Scalar(0, 255, 0), 2, 8, 0);

		Mat result;
		approxPolyDP(contours[t], result, 4, true);  //多边形拟合
		drawapp(result, img);
		cout << "corners : " << result.rows << endl;

		//判断形状和绘制轮廓
		if (result.rows == 3)
		{
			putText(img, "triangle", center, 0, 1, Scalar(0, 255, 0), 1, 8);
		}
		if (result.rows == 4)
		{
			putText(img, "rectangle", center, 0, 1, Scalar(0, 255, 0), 1, 8);
		}
		if (result.rows == 8)
		{
			putText(img, "poly-8", center, 0, 1, Scalar(0, 255, 0), 1, 8);
		}
		if (result.rows > 12)
		{
			putText(img, "circle", center, 0, 1, Scalar(0, 255, 0), 1, 8);
		}
	}
	imshow("result", img);
	waitKey(0);
	return 0;
}

7.2.5 点到轮廓距离

pointPolygonTest()

代码清单7-25 点到轮廓距离

#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //更改输出界面颜色
	Mat img = imread("approx.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	// 边缘检测
	Mat canny;
	Canny(img, canny, 80, 160, 3, false);
	//膨胀运算
	Mat kernel = getStructuringElement(0, Size(3, 3));
	dilate(canny, canny, kernel);

	// 轮廓发现
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(canny, contours, hierarchy, 0, 2, Point());

	//创建图像中的一个像素点并绘制圆形
	Point point = Point(250, 200);
	circle(img, point, 2, Scalar(0, 0, 255), 2, 8, 0);

	//多边形
	for (int t = 0; t < contours.size(); t++)
	{
		//用最小外接矩形求取轮廓中心
		RotatedRect rrect = minAreaRect(contours[t]);
		Point2f center = rrect.center;
		circle(img, center, 2, Scalar(0, 255, 0), 2, 8, 0);  //绘制圆心点
															 //轮廓外部点距离轮廓的距离
		double dis = pointPolygonTest(contours[t], point, true);
		//轮廓内部点距离轮廓的距离
		double dis2 = pointPolygonTest(contours[t], center, true);
		//输出点结果
		cout << "外部点距离轮廓距离:" << dis << endl;
		cout << "内部点距离轮廓距离:" << dis2 << endl;
	}
	return 0;
}

7.2.6 凸包检测

convexHull()

代码清单7-27 凸包检测

#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("hand.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	// 二值化
	Mat gray, binary;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	threshold(gray, binary, 105, 255, THRESH_BINARY);

	//开运算消除细小区域
	Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	morphologyEx(binary, binary, MORPH_OPEN, k);
	imshow("binary", binary);

	// 轮廓发现
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(binary, contours, hierarchy, 0, 2, Point());
	for (int n = 0; n < contours.size(); n++)
	{
		//计算凸包
		vector<Point> hull;
		convexHull(contours[n], hull);
		//绘制凸包
		for (int i = 0; i < hull.size(); i++)
		{
			//绘制凸包顶点
			circle(img, hull[i], 4, Scalar(255, 0, 0), 2, 8, 0);
			//连接凸包
			if (i == hull.size() - 1)
			{
				line(img, hull[i], hull[0], Scalar(0, 0, 255), 2, 8, 0);
				break;
			}
			line(img, hull[i], hull[i + 1], Scalar(0, 0, 255), 2, 8, 0);
		}
	}
	imshow("hull", img);
	waitKey(0);
	return 0;
}

7.3 矩的计算

7.3.1 几何矩与中心矩

moments()

代码清单7-29 计算图像矩

#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("approx.png");

	// 二值化
	Mat gray, binary;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	threshold(gray, binary, 105, 255, THRESH_BINARY);

	//开运算消除细小区域
	Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	morphologyEx(binary, binary, MORPH_OPEN, k);

	// 轮廓发现
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(binary, contours, hierarchy, 0, 2, Point());
	for (int n = 0; n < contours.size(); n++) 
	{
		Moments M;
		M = moments(contours[n], true);
		cout << "spatial moments:" << endl
			<< "m00:" << M.m00 << " m01:" << M.m01 << " m10:" << M.m10 << endl
			<< "m11:" << M.m11 << " m02:" << M.m02 << " m20:" << M.m20 << endl
			<< "m12:" << M.m12 << " m21:" << M.m21 << " m03:" << M.m03 << " m30:"<< M.m30 << endl;

		cout << "central moments:" << endl
			<< "mu20:" << M.mu20 << " mu02:" << M.mu02 << " mu11:" << M.mu11 << endl
			<< "mu30:" << M.mu30 << " mu21:" << M.mu21 << " mu12:" << M.mu12 << " mu03:" << M.mu03 << endl;

		cout << "central normalized moments:" << endl
			<< "nu20:" << M.nu20 << " nu02:" << M.nu02 << " nu11:" << M.nu11 << endl
			<< "nu30:" << M.nu30 << " nu21:" << M.nu21 << " nu12:" << M.nu12 << " nu03:" << M.nu03 << endl;
	}
	return 0;
}

7.3.2 Hu矩

HuMoments()

代码清单7-31 计算图像的Hu矩

#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //更改输出界面颜色
	Mat img = imread("approx.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	// 二值化
	Mat gray, binary;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	threshold(gray, binary, 105, 255, THRESH_BINARY);

	//开运算消除细小区域
	Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	morphologyEx(binary, binary, MORPH_OPEN, k);

	// 轮廓发现
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(binary, contours, hierarchy, 0, 2, Point());
	for (int n = 0; n < contours.size(); n++)
	{
		Moments M;
		M = moments(contours[n], true);
		Mat hu;
		HuMoments(M, hu);  //计算Hu矩
		cout << hu << endl;
	}
	return 0;
}

7.3.3基于Hu矩的轮廓匹配

matchShapes()

代码清单7-33 基于Hu矩的轮廓匹配

#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

void findcontours(Mat &image, vector<vector<Point>> &contours)
{
	Mat gray, binary;
	vector<Vec4i> hierarchy;
	//图像灰度化
	cvtColor(image, gray, COLOR_BGR2GRAY);
	//图像二值化
	threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
	//寻找轮廓
	findContours(binary, contours, hierarchy, 0, 2);
}

int main()
{
	Mat img = imread("ABC.png");
	Mat img_B = imread("B.png");
	if (img.empty() || img_B.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}

	resize(img_B, img_B, Size(), 0.5, 0.5);
	imwrite("B.png", img_B);
	imshow("B", img_B);

	// 轮廓提取
	vector<vector<Point>> contours1;
	vector<vector<Point>> contours2;
	findcontours(img, contours1);
	findcontours(img_B, contours2);
	// hu矩计算
	Moments mm2 = moments(contours2[0]);
	Mat hu2;
	HuMoments(mm2, hu2);
	// 轮廓匹配
	for (int n = 0; n < contours1.size(); n++)
	{
		Moments mm = moments(contours1[n]);
		Mat hum;
		HuMoments(mm, hum);
		//Hu矩匹配
		double dist;
		dist = matchShapes(hum, hu2, CONTOURS_MATCH_I1, 0);
		if (dist < 1)
		{
			drawContours(img, contours1, n, Scalar(0, 0, 255), 3, 8);
		}
	}
	imshow("match result", img);
	waitKey(0);
	return 0;
}

7.4 点集拟合

minEnclosingTriangle()

minEnclosingCircle()

代码清单7-36 点集外包轮廓

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main()
{
	Mat img(500, 500, CV_8UC3, Scalar::all(0));
	RNG& rng = theRNG();  //生成随机点

	while (true)
	{
		int i, count = rng.uniform(1, 101);
		vector<Point> points;
		//生成随机点
		for (i = 0; i < count; i++)
		{
			Point pt;
			pt.x = rng.uniform(img.cols / 4, img.cols * 3 / 4);
			pt.y = rng.uniform(img.rows / 4, img.rows * 3 / 4);
			points.push_back(pt);
		}

		//寻找包围点集的三角形 
		vector<Point2f> triangle;
		double area = minEnclosingTriangle(points, triangle);

		//寻找包围点集的圆形
		Point2f center;
		float radius = 0;
		minEnclosingCircle(points, center, radius);

		//创建两个图片用于输出结果
		img = Scalar::all(0);
		Mat img2;
		img.copyTo(img2);

		//在图像中绘制坐标点
		for (i = 0; i < count; i++)
		{
			circle(img, points[i], 3, Scalar(255, 255, 255), FILLED, LINE_AA);
			circle(img2, points[i], 3, Scalar(255, 255, 255), FILLED, LINE_AA);
		}
			
		//绘制三角形
		for (i = 0; i < 3; i++)
		{
			if (i==2)
			{
				line(img, triangle[i], triangle[0], Scalar(255, 255, 255), 1, 16);
				break;
			}
			line(img, triangle[i], triangle[i + 1], Scalar(255, 255, 255), 1, 16);
		}

		//绘制圆形
		circle(img2, center, cvRound(radius), Scalar(255, 255, 255), 1, LINE_AA);

		//输出结果
		imshow("triangle", img);
		imshow("circle", img2);

		//按q键或者ESC键退出程序
		char key = (char)waitKey();
		if (key == 27 || key == 'q' || key == 'Q')
		{
			break;
		}
	}
	return 0;
}

7.5 QR二维码检测

QRCodeDetector类

detect()

decode()

detectAndDecode()

代码清单7-40 二维码识别

#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>

using namespace cv;
using namespace std;

int main(int argc, char** argv) 
{
	Mat img = imread("qrcode2.png");

	Mat gray, qrcode_bin;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	QRCodeDetector qrcodedetector;
	vector<Point> points;
	string information;
	bool isQRcode;
	isQRcode = qrcodedetector.detect(gray, points);  //识别二维码
	if (isQRcode)
	{
		//解码二维码
		information = qrcodedetector.decode(gray, points, qrcode_bin);
		cout << points << endl;  //输出二维码四个顶点的坐标
	}
	else
	{
		cout << "无法识别二维码,请确认图像时候含有二维码" << endl;
		return -1;
	}
	//绘制二维码的边框
	for (int i = 0; i < points.size(); i++)
	{
		if (i== points.size()-1)
		{
			line(img, points[i], points[0], Scalar(0, 0, 255), 2, 8);
			break;
		}
		line(img, points[i], points[i + 1], Scalar(0, 0, 255), 2, 8);
	}
	//将解码内容输出到图片上
	putText(img, information.c_str(),Point(20, 30), 0, 1.0, Scalar(0, 0, 255), 2, 8);

	//利用函数直接定位二维码并解码
	string information2;
	vector<Point> points2;
	information2 = qrcodedetector.detectAndDecode(gray,points2);
	cout << points2 << endl;
	putText(img, information2.c_str(), Point(20, 55), 0, 1.0, Scalar(0, 0, 0), 2, 8);

	//输出结果
	imshow("result", img);
	namedWindow("qrcode_bin", WINDOW_NORMAL);
	imshow("qrcode_bin", qrcode_bin);
	waitKey(0);
	return 0;
}

第八章 图像分析与修复

8.1 傅里叶变换

8.1.1 离散傅里叶变换

df()

idft()

getOptimalDFTSize()

magnitude()

代码清单8-6 离散傅里叶变换

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	//对矩阵进行处理,展示正逆变换的关系
	Mat a = (Mat_<float>(5, 5) << 1, 2, 3, 4, 5,
		2, 3, 4, 5, 6,
		3, 4, 5, 6, 7,
		4, 5, 6, 7, 8,
		5, 6, 7, 8, 9);
	Mat b, c, d;
	dft(a, b, DFT_COMPLEX_OUTPUT);  //正变换
	dft(b, c, DFT_INVERSE | DFT_SCALE | DFT_REAL_OUTPUT);  //逆变换只输出实数
	idft(b, d, DFT_SCALE);  //逆变换

	//对图像进行处理
	Mat img = imread("lena.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat gray;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	resize(gray, gray, Size(502, 502));
	imshow("原图像", gray);

	//计算合适的离散傅里叶变换尺寸
	int rows = getOptimalDFTSize(gray.rows);
	int cols = getOptimalDFTSize(gray.cols);

	//扩展图像
	Mat appropriate;
	int T = (rows - gray.rows) / 2;  //上方扩展行数
	int B = rows - gray.rows - T;  //下方扩展行数
	int L = (cols - gray.cols) / 2;  //左侧扩展行数
	int R = cols - gray.cols - L;  //右侧扩展行数
	copyMakeBorder(gray, appropriate, T, B, L, R, BORDER_CONSTANT);
	imshow("扩展后的图像", appropriate);

	//构建离散傅里叶变换输入量
	Mat flo[2], complex;
	flo[0] = Mat_<float>(appropriate);  //实数部分
	flo[1] = Mat::zeros(appropriate.size(), CV_32F);  //虚数部分
	merge(flo, 2, complex);  //合成一个多通道矩阵

	//进行离散傅里叶变换
	Mat result;
	dft(complex, result);

	//将复数转化为幅值
	Mat resultC[2];
	split(result, resultC);  //分成实数和虚数
	Mat amplitude;
	magnitude(resultC[0], resultC[1], amplitude);

	//进行对数放缩公式为: M1 = log(1+M),保证所有数都大于0
	amplitude = amplitude + 1;
	log(amplitude, amplitude);//求自然对数

	//与原图像尺寸对应的区域								
	amplitude = amplitude(Rect(T, L, gray.cols, gray.rows));
	normalize(amplitude, amplitude, 0, 1, NORM_MINMAX);  //归一化
	imshow("傅里叶变换结果幅值图像", amplitude);  //显示结果

	//重新排列傅里叶图像中的象限,使得原点位于图像中心
	int centerX = amplitude.cols / 2;
	int centerY = amplitude.rows / 2;
	//分解成四个小区域
	Mat Qlt(amplitude, Rect(0, 0, centerX, centerY));//ROI区域的左上
	Mat Qrt(amplitude, Rect(centerX, 0, centerX, centerY));//ROI区域的右上
	Mat Qlb(amplitude, Rect(0, centerY, centerX, centerY));//ROI区域的左下
	Mat Qrb(amplitude, Rect(centerX, centerY, centerX, centerY));//ROI区域的右下

	//交换象限,左上和右下进行交换
	Mat med;
	Qlt.copyTo(med);
	Qrb.copyTo(Qlt);
	med.copyTo(Qrb);
	//交换象限,左下和右上进行交换
	Qrt.copyTo(med);
	Qlb.copyTo(Qrt);
	med.copyTo(Qlb);

	imshow("中心化后的幅值图像", amplitude);
	waitKey(0);
	return 0;
}

8.1.2 傅里叶变换进行卷积

dft()

mulSpectrums()

代码清单8-8 通过傅里叶变换进行卷积

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("lena.png");
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	Mat gray;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	Mat grayfloat = Mat_<float>(gray);  //更改图像数据类型为float
	Mat kernel = (Mat_<float>(5, 5) << 1, 1, 1, 1, 1,
		1, 1, 1, 1, 1,
		1, 1, 1, 1, 1,
		1, 1, 1, 1, 1,
		1, 1, 1, 1, 1);
	//构建输出图像
	Mat result;
	int rwidth = abs(grayfloat.rows - kernel.rows) + 1;
	int rheight = abs(grayfloat.cols - kernel.cols) + 1;
	result.create(rwidth, rheight, grayfloat.type());

	// 计算最优离散傅里叶变换尺寸
	int width = getOptimalDFTSize(grayfloat.cols + kernel.cols - 1);
	int height = getOptimalDFTSize(grayfloat.rows + kernel.rows - 1);

	//改变输入图像尺寸
	Mat tempA;
	int A_T = 0;
	int A_B = width - grayfloat.rows;
	int A_L = 0;
	int A_R = height - grayfloat.cols;
	copyMakeBorder(grayfloat, tempA, 0, A_B, 0, A_R, BORDER_CONSTANT);

	//改变滤波器尺寸
	Mat tempB;
	int B_T = 0;
	int B_B = width - kernel.rows;
	int B_L = 0;
	int B_R = height - kernel.cols;
	copyMakeBorder(kernel, tempB, 0, B_B, 0, B_R, BORDER_CONSTANT);

	//分别进行离散傅里叶变换
	dft(tempA, tempA, 0, grayfloat.rows);
	dft(tempB, tempB, 0, kernel.rows);

	//多个傅里叶变换的结果相乘
	mulSpectrums(tempA, tempB, tempA, DFT_COMPLEX_OUTPUT);

	//相乘结果进行逆变换
	//dft(tempA, tempA, DFT_INVERSE | DFT_SCALE, result.rows);
	idft(tempA, tempA, DFT_SCALE, result.rows);

	//对逆变换结果进行归一化
	normalize(tempA, tempA, 0, 1, NORM_MINMAX);

	//截取部分结果作为滤波结果
	tempA(Rect(0, 0, result.cols, result.rows)).copyTo(result);

	//显示结果
	imshow("原图像", gray);
	imshow("滤波结果", result);
	waitKey(0);
}

8.1.3 离散余弦变换

dct()

idct()

代码清单8-11 图像离散余弦变换

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat kernel = (Mat_<float>(5, 5) << 1, 2, 3, 4, 5,
		                               2, 3, 4, 5, 6,
		                               3, 4, 5, 6, 7,
		                               4, 5, 6, 7, 8,
		                               5, 6, 7, 8, 9);
	Mat a, b;
	dct(kernel, a);
	idct(a, b);

	//对图像进行处理
	Mat img = imread("lena.png");
	if (!img.data)
	{
		cout << "读入图像出错,请确认图像名称是否正确" << endl;
		return -1;
	}
	imshow("原图像", img);
	
	//计算最佳变换尺寸
	int width = 2 * getOptimalDFTSize((img.cols + 1) / 2);
	int height = 2 * getOptimalDFTSize((img.rows + 1) / 2);
	
	//扩展图像尺寸
	int T = 0;
	int B = height - T - img.rows;
	int L = 0;
	int R = width - L - img.rows;
	Mat appropriate;
	copyMakeBorder(img, appropriate, T, B, L, R, BORDER_CONSTANT, Scalar(0));

	//提三个通道需要分别进行DCT变换
	vector<Mat> channels;
	split(appropriate, channels);

	//提取NGR颜色各个通道的值
	Mat one = channels.at(0);	
	Mat two = channels.at(1);
	Mat three = channels.at(2);

	//进行DCT变换
	Mat oneDCT, twoDCT, threeDCT;
	dct(Mat_<float>(one), oneDCT);
	dct(Mat_<float>(two), twoDCT);
	dct(Mat_<float>(three), threeDCT);

	//重新组成三个通道
	vector<Mat> channelsDCT;
	channelsDCT.push_back(Mat_<uchar>(oneDCT));
	channelsDCT.push_back(Mat_<uchar>(twoDCT));
	channelsDCT.push_back(Mat_<uchar>(threeDCT));

	//输出图像
	Mat result;
	merge(channelsDCT, result);
	imshow("DCT图像", result);
	waitKey();
	return 0;
}

8.2 积分图像

integral()

代码清单8-15 计算积分图像

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	//创建一个16×16全为1的矩阵,因为256=16×16
	Mat img = Mat::ones(16, 16, CV_32FC1);

	//在图像中加入随机噪声
	RNG rng(10086);
	for (int y = 0; y < img.rows; y++)
	{
		for (int x = 0; x < img.cols; x++)
		{
			float d = rng.uniform(-0.5, 0.5);
			img.at<float>(y, x) = img.at<float>(y, x) + d;
		}
	}
	
	//计算标准求和积分
	Mat sum;
	integral(img, sum);
	//为了便于显示,转成CV_8U格式
	Mat sum8U = Mat_<uchar>(sum);

	//计算平方求和积分
	Mat sqsum;
	integral(img, sum, sqsum);
	//为了便于显示,转成CV_8U格式
	Mat sqsum8U = Mat_<uchar>(sqsum);

	//计算倾斜求和积分
	Mat tilted;
	integral(img, sum, sqsum, tilted);
	//为了便于显示,转成CV_8U格式
	Mat tilted8U = Mat_<uchar>(tilted);

	//输出结果
	namedWindow("sum8U", WINDOW_NORMAL);
	namedWindow("sqsum8U", WINDOW_NORMAL);
	namedWindow("tilted8U", WINDOW_NORMAL);
	imshow("sum8U", sum8U);
	imshow("sqsum8U", sqsum8U);
	imshow("tilted8U", tilted8U);

	waitKey();
	return 0;
}

8.3 图像分割

8.3.1 漫水填充法

floodFill()

代码清单8-18 漫水填充法分割图像

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //将DOS界面调成白底黑字
	Mat img = imread("lena.png");
	if (!(img.data))
	{
		cout << "读取图像错误,请确认图像文件是否正确" << endl;
		return -1;
	}

	
	RNG rng(10086);//随机数,用于随机生成像素
	
	//设置操作标志flags
	int connectivity = 4;  //连通邻域方式
	int maskVal = 255;  //掩码图像的数值
	int flags = connectivity|(maskVal<<8)| FLOODFILL_FIXED_RANGE;  //漫水填充操作方式标志

	//设置与选中像素点的差值
	Scalar loDiff = Scalar(20, 20, 20);
	Scalar upDiff = Scalar(20, 20, 20);

	//声明掩模矩阵变量
	Mat mask = Mat::zeros(img.rows + 2, img.cols + 2, CV_8UC1);

	while (true)
	{
		//随机产生图像中某一像素点
		int py = rng.uniform(0,img.rows-1);
		int px = rng.uniform(0, img.cols - 1);
		Point point = Point(px, py);
		
		//彩色图像中填充的像素值
		Scalar newVal = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));

		//漫水填充函数
		int area = floodFill(img, mask, point, newVal, &Rect(),loDiff,upDiff,flags);
	
		//输出像素点和填充的像素数目
		cout << "像素点x:" << point.x << "  y:" << point.y
			<< "     填充像数数目:" << area << endl;

		//输出填充的图像结果
		imshow("填充的彩色图像", img);
		imshow("掩模图像", mask);

		//判断是否结束程序
		int c = waitKey(0);
		if ((c&255)==27)
		{
			break;
		}
	}
	return 0;
}

8.3.2 分水岭法

watershed()

代码清单8-20 分水岭法分割图像

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	Mat img, imgGray, imgMask;
	Mat maskWaterShed;  // watershed()函数的参数
	img = imread("HoughLines.jpg");  //原图像
	if (img.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	cvtColor(img, imgGray, COLOR_BGR2GRAY);
	//GaussianBlur(imgGray, imgGray, Size(5, 5), 10, 20);  //模糊用于减少边缘数目

	//提取边缘并进行闭运算
	Canny(imgGray, imgMask, 150, 300);
	//Mat k = getStructuringElement(0, Size(3, 3));
	//morphologyEx(imgMask, imgMask, MORPH_CLOSE, k);

	imshow("边缘图像", imgMask);
	imshow("原图像", img);

	//计算连通域数目
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(imgMask, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);

	//在maskWaterShed上绘制轮廓,用于输入分水岭算法
	maskWaterShed = Mat::zeros(imgMask.size(), CV_32S);
	for (int index = 0; index < contours.size(); index++)
	{
		drawContours(maskWaterShed, contours, index, Scalar::all(index + 1),
			-1, 8, hierarchy, INT_MAX);
	}
	//分水岭算法   需要对原图像进行处理
	watershed(img, maskWaterShed);

	vector<Vec3b> colors;  // 随机生成几种颜色
	for (int i = 0; i < contours.size(); i++)
	{
		int b = theRNG().uniform(0, 255);
		int g = theRNG().uniform(0, 255);
		int r = theRNG().uniform(0, 255);
		colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
	}

	Mat resultImg = Mat(img.size(), CV_8UC3);  //显示图像
	for (int i = 0; i < imgMask.rows; i++)
	{
		for (int j = 0; j < imgMask.cols; j++)
		{
			// 绘制每个区域的颜色
			int index = maskWaterShed.at<int>(i, j);
			if (index == -1)  // 区域间的值被置为-1(边界)
			{
				resultImg.at<Vec3b>(i, j) = Vec3b(255, 255, 255);
			}
			else if (index <= 0 || index > contours.size())  // 没有标记清楚的区域被置为0 
			{
				resultImg.at<Vec3b>(i, j) = Vec3b(0, 0, 0);
			}
			else  // 其他每个区域的值保持不变:1,2,…,contours.size()
			{
				resultImg.at<Vec3b>(i, j) = colors[index - 1];  // 把些区域绘制成不同颜色
			}
		}
	}

	resultImg = resultImg * 0.6 + img * 0.4;
	imshow("分水岭结果", resultImg);

	//绘制每个区域的图像
	for (int n = 1; n <= contours.size(); n++)
	{
		Mat resImage1 = Mat(img.size(), CV_8UC3);  // 声明一个最后要显示的图像
		for (int i = 0; i < imgMask.rows; i++)
		{
			for (int j = 0; j < imgMask.cols; j++)
			{
				int index = maskWaterShed.at<int>(i, j);
				if (index == n)
					resImage1.at<Vec3b>(i, j) = img.at<Vec3b>(i, j);
				else
					resImage1.at<Vec3b>(i, j) = Vec3b(0, 0, 0);
			}
		}
		//显示图像
		imshow(to_string(n), resImage1);
	}

	waitKey(0);
	return 0;
}

8.3.3 Grabcut 法

grabCut()

代码清单8-22 利用Grabcut 法进行图像分割

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("lena.png");
	if (!img.data)  //防止错误读取图像
	{
		cout<<"读取图像错误,请确认图像文件是否正确" << endl; 
		return 0;
	}

	//绘制矩形
	Mat imgRect;
	img.copyTo(imgRect);  //备份图像,方式绘制矩形框对结果产生影响
	Rect rect(80, 30, 340, 390);
	rectangle(imgRect, rect, Scalar(255, 255, 255),2);
	imshow("选择的矩形区域", imgRect);

	//进行分割
	Mat bgdmod = Mat::zeros(1, 65, CV_64FC1);
	Mat fgdmod = Mat::zeros(1, 65, CV_64FC1);
	Mat mask = Mat::zeros(img.size(), CV_8UC1);
	grabCut(img, mask, rect, bgdmod, fgdmod, 5, GC_INIT_WITH_RECT);
	
	//将分割出的前景绘制回来
	Mat result;
	for (int row = 0; row < mask.rows; row++) 
	{
		for (int col = 0; col < mask.cols; col++) 
		{
			int n = mask.at<uchar>(row, col);
			//将明显是前景和可能是前景的区域都保留
			if (n == 1 || n == 3) 
			{
				mask.at<uchar>(row, col) = 255;
			}
			//将明显是背景和可能是背景的区域都删除
			else 
			{
				mask.at<uchar>(row, col) = 0;
			}
		}
	}
	bitwise_and(img, img, result, mask);
	imshow("分割结果", result);
	waitKey(0);
	return 0;
}

8.3.4 Mean-Shift 法

pyrMeanShiftFiltering()

TermCriteria构造函数

代码清单8-25 利用Mean-Shift 法分割图像

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("coins.png");
	if (!img.data)
	{
		cout << "读取图像错误,请确认图像文件是否正确" << endl;
		return -1;
	}

	//分割处理
	Mat result1, result2;
	TermCriteria T10 = TermCriteria(TermCriteria::COUNT | TermCriteria::EPS, 10, 0.1);
	pyrMeanShiftFiltering(img, result1, 20, 40, 2, T10);  //第一次分割
	pyrMeanShiftFiltering(result1, result2, 20, 40, 2, T10);  //第一次分割的结果再次分割

															  //显示分割结果
	imshow("img", img);
	imshow("result1", result1);
	imshow("result2", result2);

	//对图像提取Canny边缘
	Mat imgCanny, result1Canny, result2Canny;
	Canny(img, imgCanny, 150, 300);
	Canny(result1, result1Canny, 150, 300);
	Canny(result2, result2Canny, 150, 300);

	//显示边缘检测结果
	imshow("imgCanny", imgCanny);
	imshow("result1Canny", result1Canny);
	imshow("result2Canny", result2Canny);
	waitKey(0);
	return 0;
}

8.4 图像修复

inpaint()

代码清单8-27 图像修复

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img1 = imread("inpaint1.png");
	Mat img2 = imread("inpaint2.png");
	if (img1.empty() || img2.empty())
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}
	imshow("img1", img1);
	imshow("img2", img2);

	//转换为灰度图
	Mat img1Gray, img2Gray;
	cvtColor(img1, img1Gray, COLOR_RGB2GRAY, 0);
	cvtColor(img2, img2Gray, COLOR_RGB2GRAY, 0);

	//通过阈值处理生成Mask掩模
	Mat img1Mask, img2Mask;
	threshold(img1Gray, img1Mask, 245, 255, THRESH_BINARY);
	threshold(img2Gray, img2Mask, 245, 255, THRESH_BINARY);

	//对Mask膨胀处理,增加Mask面积
	Mat Kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
	dilate(img1Mask, img1Mask, Kernel);
	dilate(img2Mask, img2Mask, Kernel);

	//图像修复
	Mat img1Inpaint, img2Inpaint;
	inpaint(img1, img1Mask, img1Inpaint, 5, INPAINT_NS);
	inpaint(img2, img2Mask, img2Inpaint, 5, INPAINT_NS);

	//显示处理结果
	imshow("img1Mask", img1Mask);
	imshow("img1修复后", img1Inpaint);
	imshow("img2Mask", img2Mask);
	imshow("img2修复后", img2Inpaint);
	waitKey();
	return 0;
}

第九章 特征点检测与匹配

9.1 角点检测

9.1.1 显示关键点

drawKeypoints()

Keypoint数据类型

代码清单9-3 绘制关键点

#include <opencv2\opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("lena.png", IMREAD_COLOR);
     //判断加载图像是否存在
     if (!img.data)
	{
		cout << "读取图像错误,请确认图像文件是否正确" << endl;
		return -1;
	}
     

	Mat imgGray;
	cvtColor(img, imgGray, COLOR_BGR2GRAY);
	//生成关键点
	vector<KeyPoint> keypoints;
	RNG rng(10086);
	for (int i = 0; i < 100; i++)
	{
		float pty = rng.uniform(0, img.rows - 1);
		float ptx = rng.uniform(0, img.cols - 1);
		KeyPoint keypoint;  //对KeyPoint类进行赋值
		keypoint.pt.x = ptx;
		keypoint.pt.y = pty;
		keypoints.push_back(keypoint);  //保存进关键点向量中
	}

	//绘制关键点
	drawKeypoints(img, keypoints, img, Scalar(0, 0, 0));
	drawKeypoints(imgGray, keypoints, imgGray, Scalar(255, 255, 255));
	
	//显示图像绘制结果
	imshow("img", img);
	imshow("imgGray", imgGray);
	waitKey(0);
	return 0;
}

9.1.2 Harris 角点检测

cornerHarris()

代码清单9-5 检测Harris 角点

#include <opencv2/opencv.hpp>
#include<iostream>

using namespace cv;
using namespace std;


int main()
{
	Mat img = imread("lena.png", IMREAD_COLOR);
	if (!img.data)
	{
		cout << "读取图像错误,请确认图像文件是否正确" << endl;
		return -1;
	}

	//转成灰度图像
	Mat gray;
	cvtColor(img, gray, COLOR_BGR2GRAY);

	//计算Harris系数
	Mat harris;
	int blockSize = 2;  //邻域半径
	int apertureSize = 3;  //
	cornerHarris(gray, harris, blockSize, apertureSize, 0.04);
	
	//归一化便于进行数值比较和结果显示
	Mat harrisn;
	normalize(harris, harrisn, 0, 255, NORM_MINMAX);
	//将图像的数据类型变成CV_8U
	convertScaleAbs(harrisn, harrisn);
	
	//寻找Harris角点
	vector<KeyPoint> keyPoints;
	for (int row = 0; row < harrisn.rows; row++)
	{
		for (int col = 0; col < harrisn.cols; col++)
		{
			int R = harrisn.at<uchar>(row, col);
			if (R > 125)
			{
				//向角点存入KeyPoint中
				KeyPoint keyPoint;
				keyPoint.pt.y = row;
				keyPoint.pt.x = col;
				keyPoints.push_back(keyPoint);
			}
		}
	}

	//绘制角点与显示结果
	drawKeypoints(img, keyPoints, img);
	imshow("系数矩阵", harrisn);
	imshow("Harris角点", img);
	waitKey(0);
	return 0;
}

9.1.3 Shi-Tomas 角点检测

goodFeaturesToTrack()

代码清单9-7 检测Shi-Tomas 角点

#include <opencv2/opencv.hpp>
#include<iostream>

using namespace cv;
using namespace std;

int main()
{
	Mat img = imread("lena.png");
	if (!img.data)
	{
		cout << "读取图像错误,请确认图像文件是否正确" << endl;
		return -1;
	}
	//深拷贝用于第二种方法绘制角点
	Mat img2;
	img.copyTo(img2);
	Mat gray;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	// Detector parameters

	//提取角点
	int maxCorners = 100;  //检测角点数目
	double quality_level = 0.01;  //质量等级,或者说阈值与最佳角点的比例关系
	double minDistance = 0.04;  //两个角点之间的最小欧式距离
	vector<Point2f> corners;
	goodFeaturesToTrack(gray, corners, maxCorners, quality_level, minDistance, Mat(), 3, false);

	//绘制角点
	vector<KeyPoint> keyPoints;  //存放角点的KeyPoint类,用于后期绘制角点时用
	RNG rng(10086);
	for (int i = 0; i < corners.size(); i++) 
	{
		//第一种方式绘制角点,用circle()函数绘制角点
		int b = rng.uniform(0, 256);
		int g = rng.uniform(0, 256);
		int r = rng.uniform(0, 256);
		circle(img, corners[i], 5, Scalar(b, g, r), 2, 8, 0);

		//将角点存放在KeyPoint类中
		KeyPoint keyPoint;
		keyPoint.pt = corners[i];
		keyPoints.push_back(keyPoint);
	}

	//第二种方式绘制角点,用drawKeypoints()函数
	drawKeypoints(img2, keyPoints, img2);
	//输出绘制角点的结果
	imshow("用circle()函数绘制角点结果", img);
	imshow("通过绘制关键点函数绘制角点结果", img2);
	waitKey(0);
	return 0;
}

9.1.4 亚像素级别角点检测

cornerSubPix()

代码清单9-9 计算亚像素级别角点坐标

#include <opencv2/opencv.hpp>
#include <iostream>
#include <string>

using namespace cv;
using namespace std;

int main()
{
	system("color F0");  //改变DOS界面颜色
	Mat img = imread("lena.png",IMREAD_COLOR);
	if (!img.data)
	{
		cout << "读取图像错误,请确认图像文件是否正确" << endl;
		return -1;
	}
	//彩色图像转成灰度图像
	Mat gray;
	cvtColor(img, gray, COLOR_BGR2GRAY);

	//提取角点
	int maxCorners = 100;  //检测角点数目
	double quality_level = 0.01;  //质量等级,或者说阈值与最佳角点的比例关系
	double minDistance = 0.04;  //两个角点之间的最小欧式距离
	vector<Point2f> corners;
	goodFeaturesToTrack(gray, corners, maxCorners, quality_level, minDistance, Mat(), 3, false);
	
	//计算亚像素级别角点坐标
	vector<Point2f> cornersSub = corners;  //角点备份,方式别函数修改
	Size winSize = Size(5, 5);
	Size zeroZone = Size(-1, -1);
	TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 40, 0.001);
	cornerSubPix(gray, cornersSub , winSize, zeroZone, criteria);

	//输出初始坐标和精细坐标
	for (size_t i = 0; i < corners.size(); i++)
	{
		string str = to_string(i);
		str = "第" + str + "个角点点初始坐标:";
		cout << str << corners[i] << "   精细后坐标:" << cornersSub[i] << endl;
	}
	return 0;
}

9.2 特征点检测

9.2.1关键点

KeyPoint()

detect()

9.2.2 描述子

compute()

detectAndCompute()

9.2.3 SIFT 特征点检测

SIFT ::create()

9.2.4 SURF 特征点检测

SURF::create()

代码清单9-16 计算SURF 特征点

#include <opencv2\opencv.hpp>
#include <xfeatures2d.hpp>  //SURF特征点头文件
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;  
using namespace xfeatures2d;  //SURF特征点命名空间

int main()
{
	Mat img = imread("lena.png");
	if (!img.data)
	{
		cout << "读取图像错误,请确认图像文件是否正确" << endl;
		return -1;
	}

	//创建SURF特征点类变量
	Ptr<SURF> surf = SURF::create(500,  //关键点阈值
		                            4,  //4组金字塔
		                            3,  //每组金字塔有3层
		                         true,  //使用128维描述子
		                        false);  //计算关键点方向

	//计算SURF关键点
	vector<KeyPoint> Keypoints;
	surf->detect(img, Keypoints);  //确定关键点

	//计算SURF描述子
	Mat descriptions;
	surf->compute(img, Keypoints, descriptions);  //计算描述子
	
	//绘制特征点
	Mat imgAngel;
	img.copyTo(imgAngel);
	//绘制不含角度和大小的结果
	drawKeypoints(img, Keypoints, img,Scalar(255,255,255));
	//绘制含有角度和大小的结果
	drawKeypoints(img, Keypoints, imgAngel, Scalar(255, 255, 255),DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

	//显示结果
	imshow("不含角度和大小的结果", img);
	imshow("含有角度和大小的结果", imgAngel);
	waitKey(0); 
	return 0;
}

9.2.5 ORB特征点检测

ORB::create()

ORB::HARRIS_SCORE

代码清单9-18 计算ORB特征点

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;

int main()
{
	Mat img = imread("lena.png");
	if (!img.data)
	{
		cout << "请确认图像文件名称是否正确" << endl;
		return -1;
	}

	//创建 ORB 特征点类变量
	Ptr<ORB> orb = ORB::create(500, //特征点数目
		1.2f, //金字塔层级之间的缩放比例
		8, //金字塔图像层数系数
		31, //边缘阈值
		0, //原图在金字塔中的层数
		2, //生成描述子时需要用的像素点数目
		ORB::HARRIS_SCORE, //使用 Harris 方法评价特征点
		31, //生成描述子时关键点周围邻域的尺寸
		20 //计算 FAST 角点时像素值差值的阈值
	);

	//计算 ORB 关键点
	vector<KeyPoint> Keypoints;
	orb->detect(img, Keypoints); //确定关键点

								 //计算 ORB 描述子
	Mat descriptions;
	orb->compute(img, Keypoints, descriptions); //计算描述子

												//绘制特征点
	Mat imgAngel;
	img.copyTo(imgAngel);
	//绘制不含角度和大小的结果
	drawKeypoints(img, Keypoints, img, Scalar(255, 255, 255));
	//绘制含有角度和大小的结果
	drawKeypoints(img, Keypoints, imgAngel, Scalar(255, 255, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

	//显示结果

	imshow("不含角度和大小的结果", img);
	imshow("含有角度和大小的结果", imgAngel);
	waitKey(0);
	return 0;
}

9.3 特征点匹配

9.3.1 DescriptorMatcher类介绍

DescriptorMatcher::match()

DMatch类

DescriptorMatcher::knnMatch()

DescriptorMatcher::radiusMatch()

9.3.2 暴力匹配

BFMatcher构造函数

9.3.3 显示特征点匹配结果

drawMatches()

代码清单9-25 ORB特征点暴力匹配

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;  

void orb_features(Mat &gray, vector<KeyPoint> &keypionts, Mat &descriptions)
{
	Ptr<ORB> orb = ORB::create(1000, 1.2f);
	orb->detect(gray, keypionts);
	orb->compute(gray, keypionts, descriptions);
}

int main()
{
	Mat img1, img2;  
	img1 = imread("box.png");  
	img2 = imread("box_in_scene.png");

	if (!(img1.data && img2.dataend))
	{
		cout << "读取图像错误,请确认图像文件是否正确" << endl;
		return -1;
	}

	//提取ORB特征点
	vector<KeyPoint> Keypoints1, Keypoints2;
	Mat descriptions1, descriptions2;

	//计算特征点
	orb_features(img1, Keypoints1, descriptions1);
	orb_features(img2, Keypoints2, descriptions2);

	//特征点匹配
	vector<DMatch> matches;  //定义存放匹配结果的变量
	BFMatcher matcher(NORM_HAMMING);  //定义特征点匹配的类,使用汉明距离
	matcher.match(descriptions1, descriptions2, matches);  //进行特征点匹配
	cout << "matches=" << matches.size() << endl;  //匹配成功特征点数目
	
	//通过汉明距离删选匹配结果
	double min_dist = 10000, max_dist = 0;
	for (int i = 0; i < matches.size(); i++)
	{
		double dist = matches[i].distance;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;
	}

	//输出所有匹配结果中最大韩明距离和最小汉明距离
	cout << "min_dist=" << min_dist << endl;
	cout << "max_dist=" << max_dist << endl;

	//将汉明距离较大的匹配点对删除
	vector<DMatch>  good_matches;
	for (int i = 0; i < matches.size(); i++)
	{
		if (matches[i].distance <= max(2 * min_dist, 20.0))
		{
			good_matches.push_back(matches[i]);
		}
	}
	cout << "good_min=" << good_matches.size() << endl;  //剩余特征点数目

	//绘制匹配结果
	Mat outimg, outimg1;
	drawMatches(img1, Keypoints1, img2, Keypoints2, matches, outimg);
	drawMatches(img1, Keypoints1, img2, Keypoints2, good_matches, outimg1);
	imshow("未筛选结果", outimg);
	imshow("最小汉明距离筛选", outimg1);

	waitKey(0);  
	return 0; 
}

9.3.4 FLANN匹配

FlannBasedMatcher构造函数

代码清单9-27 用FLANN方法匹配特征点

#include <opencv2\opencv.hpp>
#include <iostream>
#include <vector>

using namespace std;
using namespace cv;

void orb_features(Mat &gray, vector<KeyPoint> &keypionts, Mat &descriptions)
{
	Ptr<ORB> orb = ORB::create(1000, 1.2f);
	orb->detect(gray, keypionts);
	orb->compute(gray, keypionts, descriptions);
}

int main()
{
	Mat img1, img2;
	img1 = imread("box.png");
	img2 = imread("box_in_scene.png");

	if (!(img1.data && img2.dataend))
	{
		cout << "读取图像错误,请确认图像文件是否正确" << endl;
		return -1;
	}

	//提取ORB特征点
	vector<KeyPoint> Keypoints1, Keypoints2;
	Mat descriptions1, descriptions2;

	//计算SURF特征点
	orb_features(img1, Keypoints1, descriptions1);
	orb_features(img2, Keypoints2, descriptions2);

	//判断描述子数据类型,如果数据类型不符需要进行类型转换,主要针对ORB特征点
	if ((descriptions1.type() != CV_32F) && (descriptions2.type() != CV_32F))
	{
		descriptions1.convertTo(descriptions1, CV_32F);
		descriptions2.convertTo(descriptions2, CV_32F);
	}

	//特征点匹配
	vector<DMatch> matches;  //定义存放匹配结果的变量
	FlannBasedMatcher matcher;  //使用默认值即可
	matcher.match(descriptions1, descriptions2, matches);
	cout << "matches=" << matches.size() << endl;  //匹配成功特征点数目


												   //寻找距离最大值和最小值,如果是ORB特征点min_dist取值需要大一些
	double max_dist = 0; double min_dist = 100;
	for (int i = 0; i < descriptions1.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;
	}
	cout << " Max dist :" << max_dist << endl;
	cout << " Min dist :" << min_dist << endl;

	//将最大值距离的0.4倍作为最优匹配结果进行筛选
	std::vector< DMatch > good_matches;
	for (int i = 0; i < descriptions1.rows; i++)
	{
		if (matches[i].distance < 0.40 * max_dist)
		{
			good_matches.push_back(matches[i]);
		}
	}
	cout << "good_matches=" << good_matches.size() << endl;  //匹配成功特征点数目

															 //绘制匹配结果
	Mat outimg, outimg1;
	drawMatches(img1, Keypoints1, img2, Keypoints2, matches, outimg);
	drawMatches(img1, Keypoints1, img2, Keypoints2, good_matches, outimg1);
	imshow("未筛选结果", outimg);
	imshow("筛选结果", outimg1);

	waitKey(0);
	return 0;
}

9.3.5 RANSAC 优化特征点匹配

findHomography()

代码清单9-29 RANSAC 算法优化特征点匹配结果

#include <iostream>
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;  

void match_min(vector<DMatch> matches, vector<DMatch> & good_matches)
{
	double min_dist = 10000, max_dist = 0;
	for (int i = 0; i < matches.size(); i++)
	{
		double dist = matches[i].distance;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;
	}
	cout << "min_dist=" << min_dist << endl;
	cout << "max_dist=" << max_dist << endl;

	for (int i = 0; i < matches.size(); i++)
		if (matches[i].distance <= max(2 * min_dist, 20.0))
			good_matches.push_back(matches[i]);
}

//RANSAC算法实现
void ransac(vector<DMatch> matches, vector<KeyPoint> queryKeyPoint, vector<KeyPoint> trainKeyPoint, vector<DMatch> &matches_ransac)
{
	//定义保存匹配点对坐标
	vector<Point2f> srcPoints(matches.size()), dstPoints(matches.size());
	//保存从关键点中提取到的匹配点对的坐标
	for (int i = 0; i<matches.size(); i++)
	{
		srcPoints[i] = queryKeyPoint[matches[i].queryIdx].pt;
		dstPoints[i] = trainKeyPoint[matches[i].trainIdx].pt;
	}
	
	//匹配点对进行RANSAC过滤
	vector<int> inliersMask(srcPoints.size());
	//Mat homography;
	//homography = findHomography(srcPoints, dstPoints, RANSAC, 5, inliersMask);
	findHomography(srcPoints, dstPoints, RANSAC, 5, inliersMask);
	//手动的保留RANSAC过滤后的匹配点对
	for (int i = 0; i<inliersMask.size(); i++)
		if (inliersMask[i])
			matches_ransac.push_back(matches[i]);
}

void orb_features(Mat &gray, vector<KeyPoint> &keypionts, Mat &descriptions)
{
	Ptr<ORB> orb = ORB::create(1000, 1.2f);
	orb->detect(gray, keypionts);
	orb->compute(gray, keypionts, descriptions);
}

int main()
{
	Mat img1 = imread("box.png");  //读取图像,根据图片所在位置填写路径即可
	Mat img2 = imread("box_in_scene.png");
	if (!(img1.data && img2.data))
	{
		cout << "读取图像错误,请确认图像文件是否正确" << endl;
		return -1;
	}

	//提取ORB特征点
	vector<KeyPoint> Keypoints1, Keypoints2;
	Mat descriptions1, descriptions2;

	//基于区域分割的ORB特征点提取
	orb_features(img1, Keypoints1, descriptions1);
	orb_features(img2, Keypoints2, descriptions2);

	//特征点匹配
	vector<DMatch> matches, good_min,good_ransac;
	BFMatcher matcher(NORM_HAMMING);
	matcher.match(descriptions1, descriptions2, matches);
	cout << "matches=" << matches.size() << endl;

	//最小汉明距离
	match_min(matches, good_min);
	cout << "good_min=" << good_min.size() << endl;

	//用ransac算法筛选匹配结果
	ransac(good_min, Keypoints1, Keypoints2, good_ransac);
	cout << "good_matches.size=" << good_ransac.size() << endl;

	//绘制匹配结果
	Mat outimg, outimg1, outimg2;
	drawMatches(img1, Keypoints1, img2, Keypoints2, matches, outimg);
	drawMatches(img1, Keypoints1, img2, Keypoints2, good_min, outimg1);
	drawMatches(img1, Keypoints1, img2, Keypoints2, good_ransac, outimg2);
	imshow("未筛选结果", outimg);
	imshow("最小汉明距离筛选", outimg1);
	imshow("ransac筛选", outimg2);
	waitKey(0);  //等待键盘输入
	return 0;  //程序结束
}
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
基于OpenCvSharp 4.6的MinMaxLoc方法,可以优化成一个新的函数,如下所示: ```csharp public Point2f FindSubPixelExtremePoint(Mat<float> image, bool white2black) { if (image == null) { throw new ArgumentNullException(nameof(image)); } Point2f extremaLoc; double extremaVal; var criteria = new TermCriteria(CriteriaTypes.Eps | CriteriaTypes.Count, 20, 0.03); if (white2black) { extremaLoc = Cv2.PointFromNormalized(new Point2f(0, 0)); Cv2.MinMaxLoc(image, out double minVal, out double _, out Point2f minLoc, out Point2f _); Cv2.CornerSubPix(image, new[] { minLoc }, new Size(3, 3), new Size(-1, -1), criteria); extremaVal = minVal; extremaLoc = minLoc; } else { extremaLoc = Cv2.PointFromNormalized(new Point2f(1, 1)); Cv2.MinMaxLoc(image, out double _, out double maxVal, out Point2f _, out Point2f maxLoc); Cv2.CornerSubPix(image, new[] { maxLoc }, new Size(3, 3), new Size(-1, -1), criteria); extremaVal = maxVal; extremaLoc = maxLoc; } return extremaLoc; } ``` 这个函数与原来的函数相比,主要做了以下几个优化: 1. 只使用了MinMaxLoc方法返回的极值位置,而没有使用极值本身,因此去掉了minLoc、maxLoc和extremaVal这些不必要的变量。 2. 在CornerSubPix方法中,只需要传入一个点坐标,而不需要传入一个数组,因此将new[] { extremaLoc }改为extremaLoc即可。 3. 在寻找最小值和最大值时,可以直接通过PointFromNormalized方法得到矩阵的左上角和右下角位置,而不需要使用minLoc和maxLoc变量。 4. 在寻找最小值和最大值时,只需要获取minVal或maxVal,而不需要获取另一个值,因此将不需要的变量赋值为下划线。 这样的优化可以使代码更加简洁明了,提高代码的可读性和可维护性。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值