#include "PictureAlgorithm.h"
#include <QString>
#include <QDateTime>
using namespace std;
PictureAlgorithm::PictureAlgorithm()
{
}
PictureAlgorithm::~PictureAlgorithm()
{
}
/* 灰度直方图 */
Mat PictureAlgorithm::test1()
{
Mat image = imread("../x64/Debug/picture/2.jpg", IMREAD_GRAYSCALE);
imshow("4", image);
int channels[1] = {0}; //0表示第一通道
int bins[1] = {256};
float hRange[2] = {0, 255};
const float *ranges[1] = {hRange}; //必须是const
Mat calcDst; //输出的直方图数组
calcHist(&image, 1, channels, Mat(), calcDst, 1, bins, ranges, true, false); //计算直方图的数据
int histW = 512;
int histH = 400;
int binW = cvRound((double)histW / bins[0]); //返回跟参数最接近的整数值,即四舍五入
Mat histImage = Mat::zeros(408, 560, CV_8UC3);
Mat yHist = calcDst.clone();
normalize(calcDst, calcDst, 0, histImage.rows, NORM_MINMAX, -1, Mat()); //归一化到直方图的高
//直方图
for(int i=1; i<bins[0]; i++) {
int x1 = binW * (i - 1);
int y1 = histH - cvRound(calcDst.at<float>(i-1));
int x2 = binW * i;
int y2 = histH - cvRound(calcDst.at<float>(i));
line(histImage, Point(x1, y1), Point(x2, y2), Scalar(12,23,200), 1, LINE_4, 0);
}
//x坐标轴
for(int i=0; i<bins[0]; i++) {
if(i % 15 == 0) {
line(histImage, Point(binW * i, 396), Point(binW * i, 390), Scalar(255,255,0), 1, LINE_4, 0);
putText(histImage, std::to_string(i), Point(binW * i - 8, 405), FONT_HERSHEY_COMPLEX, 0.4, Scalar(255,255,0), 1, LINE_4, 0);
}
}
//y坐标轴
double minV, maxV;
Point minLoc, maxLoc;
minMaxLoc(yHist, &minV, &maxV, &minLoc, &maxLoc, Mat());
int ybH = cvRound(maxV / (float)histH);
for(int i=1; i<histH; i++) {
if(i % 20 == 0) {
line(histImage, Point(511, histH - i), Point(517, histH - i), Scalar(255,255,0), 1, LINE_4, 0);
putText(histImage, std::to_string(ybH * i), Point(520, (histH - i + 5)), FONT_HERSHEY_COMPLEX, 0.4, Scalar(255,255,0), 1, LINE_4, 0);
}
}
imshow("Histogram Demo", histImage);
return histImage;
}
/* 彩色直方图 */
void PictureAlgorithm::test2()
{
Mat image = imread("../x64/Debug/picture/3.jpg", IMREAD_REDUCED_COLOR_2);
imshow("4", image);
std::vector<Mat> bgrPlane;
split(image, bgrPlane);
int channels[1] = {0};
int bins[1] = {256};
float hRange[2] = {0, 255};
const float *ranges[1] = {hRange}; //必须是const
Mat bgrCalc[3];
calcHist(&bgrPlane[0], 1, channels, Mat(), bgrCalc[0], 1, bins, ranges, true, false);
calcHist(&bgrPlane[1], 1, channels, Mat(), bgrCalc[1], 1, bins, ranges, true, false);
calcHist(&bgrPlane[2], 1, channels, Mat(), bgrCalc[2], 1, bins, ranges, true, false);
int histW = 512;
int histH = 400;
int binW = cvRound((double)histW / bins[0]);
Mat histImage = Mat::zeros(408, 560, CV_8UC3);
Mat yHist[3];
yHist[0] = bgrCalc[0].clone();
yHist[1] = bgrCalc[1].clone();
yHist[2] = bgrCalc[2].clone();
normalize(bgrCalc[0], bgrCalc[0], 0, histImage.rows, NORM_MINMAX, -1, Mat());
normalize(bgrCalc[1], bgrCalc[1], 0, histImage.rows, NORM_MINMAX, -1, Mat());
normalize(bgrCalc[2], bgrCalc[2], 0, histImage.rows, NORM_MINMAX, -1, Mat());
Scalar szBGR[3];
szBGR[0] = Scalar(255,0,0);
szBGR[1] = Scalar(0,255,0);
szBGR[2] = Scalar(0,0,255);
//直方图
for(int i=1; i<bins[0]; i++) {
for(int j=0; j<3; j++) {
int x1 = binW * (i - 1);
int y1 = histH - cvRound(bgrCalc[j].at<float>(i-1));
int x2 = binW * i;
int y2 = histH - cvRound(bgrCalc[j].at<float>(i));
line(histImage, Point(x1, y1), Point(x2, y2), szBGR[j], 1, LINE_4, 0);
}
}
//x坐标轴
for(int i=0; i<bins[0]; i++) {
if(i % 15 == 0) {
line(histImage, Point(binW * i, 396), Point(binW * i, 390), Scalar(255,255,0), 1, LINE_4, 0);
putText(histImage, std::to_string(i), Point(binW * i - 8, 405), FONT_HERSHEY_COMPLEX, 0.4, Scalar(255,255,0), 1, LINE_4, 0);
}
}
//y坐标轴
double minV[3], maxV[3];
Point minLoc, maxLoc;
double allMax = 0;
for(int j=0; j<3; j++) {
minMaxLoc(yHist[j], &minV[j], &maxV[j], &minLoc, &maxLoc, Mat());
allMax += maxV[j];
}
int ybH = cvRound(allMax / (float)histH);
for(int i=1; i<histH; i++) {
if(i % 20 == 0) {
line(histImage, Point(511, histH - i), Point(517, histH - i), Scalar(255,255,0), 1, LINE_4, 0);
putText(histImage, std::to_string(ybH * i), Point(520, (histH - i + 5)), FONT_HERSHEY_COMPLEX, 0.4, Scalar(255,255,0), 1, LINE_4, 0);
}
}
imshow("Histogram Demo", histImage);
}
/* 二维直方图 */
void PictureAlgorithm::test3()
{
Mat image = imread("../x64/Debug/picture/4.jpg", IMREAD_REDUCED_COLOR_2);
imshow("1", image);
Mat imgHsv, histList;
cvtColor(image, imgHsv, COLOR_BGR2HSV); //色调(H),饱和度(S),明度(V)
int sBin = 256, hBin = 180;
int histBins[2] = {hBin, sBin};
float hRange[] = {0, 180};
float sRange[] = {0, 256};
const float *ranges[2] = {hRange, sRange};
int hsChannels[] = {0, 1};
calcHist(&imgHsv, 1, hsChannels, Mat(), histList, 2, histBins, ranges, true, false);
Mat hist2d = Mat::zeros(sBin, hBin, CV_8UC3);
for(int i=0; i<hBin; i++) {
for(int j=0; j<sBin; j++) {
float binVal = histList.at<float>(i, j);
rectangle(hist2d, Point(i, j), Point(i+1, j+1), Scalar::all(binVal), -1); //Scalar::all(0)就是给每个通道都赋值0
}
}
namedWindow("2D直方图", WINDOW_NORMAL);
imshow("2D直方图", histList); //2D直方图
namedWindow("2维灰度直方图", WINDOW_NORMAL);
imshow("2维灰度直方图", hist2d); //2维灰度直方图
applyColorMap(hist2d, hist2d, COLORMAP_JET);
namedWindow("2维彩色直方图", WINDOW_NORMAL);
imshow("2维彩色直方图", hist2d); //2维彩色直方图
}
/* 直方图均衡化 */
void PictureAlgorithm::test4()
{
/*Mat src = imread("../x64/Debug/picture/2.jpg", IMREAD_GRAYSCALE);
imshow("灰度图", src);
equalizeHist(src, src);
imshow("灰度图均衡化", src);*/
Mat image = imread("../x64/Debug/picture/24.png", IMREAD_COLOR); //对BGR均衡化
imshow("彩色图", image);
std::vector<Mat> bgrPlane;
split(image, bgrPlane);
equalizeHist(bgrPlane[0], bgrPlane[0]);
equalizeHist(bgrPlane[1], bgrPlane[1]);
equalizeHist(bgrPlane[2], bgrPlane[2]);
Mat image1;
merge(bgrPlane, image1);
imshow("彩色图均衡化", image1);
//Mat image = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2); //对hsv亮度均衡化
//imshow("彩色图", image);
//Mat hsv;
//cvtColor(image, hsv, COLOR_BGR2HSV);
//std::vector<Mat> bgrPlane;
//split(hsv, bgrPlane);
//equalizeHist(bgrPlane[2], bgrPlane[2]);
//Mat image1;
//merge(bgrPlane, image1);
//cvtColor(image1, image, COLOR_HSV2BGR);
//imshow("彩色图均衡化", image);
//Mat image = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2); //对yuv亮度均衡化
//imshow("彩色图", image);
//Mat yuv;
//cvtColor(image, yuv, COLOR_BGR2YUV);
//std::vector<Mat> bgrPlane;
//split(yuv, bgrPlane);
//equalizeHist(bgrPlane[0], bgrPlane[0]);
//Mat image1;
//merge(bgrPlane, image1);
//cvtColor(image1, image, COLOR_YUV2BGR);
//imshow("彩色图均衡化", image);
}
/* 卷积 */
void PictureAlgorithm::test5()
{
Mat image = imread("../x64/Debug/picture/2.jpg", IMREAD_REDUCED_COLOR_2);
imshow("22", image);
Mat meanCon;
blur(image, meanCon, Size(5,5), Point(-1, -1)); //均值卷积
imshow("1", meanCon);
Mat verCon;
blur(image, verCon, Size(1,9), Point(-1, -1)); //垂直卷积
imshow("3", verCon);
Mat box;
boxFilter(image, box, -1, Size(5,5)); //blur是boxFilter的归一化形式
imshow("4", box);
//中值滤波器
Mat median;
medianBlur(image, median, 9); //像素点邻域灰度值的中值来代替该像素点的灰度值
imshow("5", median);
//双边滤波器
Mat bilateral;
bilateralFilter(image, bilateral, 9, 50, 12.5); //结合图像的空间邻近度与像素值相似度的处理办法,在滤除噪声、平滑图像的同时,又做到边缘保存
imshow("6", bilateral);
//高斯
Mat image1;
GaussianBlur(image, image1, Size(9,9), 15); //默认sigmaX等于sigmaY
imshow("22", image1);
}
/* 高斯模糊 */
void PictureAlgorithm::test6()
{
Mat image = imread("../x64/Debug/picture/2.jpg", IMREAD_REDUCED_COLOR_2);
imshow("1", image);
Mat image1;
GaussianBlur(image, image1, Size(9,9), 15); //默认sigmaX等于sigmaY
imshow("22", image1);
GaussianBlur(image, image1, Size(15,5), 15, 5);
imshow("3", image1);
GaussianBlur(image, image1, Size(0,0), 15, 5);
imshow("4", image1);
}
/* 高斯双边模糊 */
void PictureAlgorithm::test7()
{
//保边去噪
Mat image = imread("../x64/Debug/picture/9.jpg", IMREAD_REDUCED_COLOR_2);
imshow("1", image);
Mat image1;
bilateralFilter(image, image1, 0, 200, 10);
imshow("22", image1);
}
/* 降采样 */
void PictureAlgorithm::test8()
{
Mat src = imread("../x64/Debug/picture/5.jpg", IMREAD_REDUCED_COLOR_2);
Mat enlarge, narrow, mPyr, mPyrU;
imshow("1", src);
resize(src, narrow, Size(src.cols / 2, src.rows / 2), 0, 0, INTER_LINEAR);
imshow("双线性", narrow);
resize(src, narrow, Size(src.cols / 2, src.rows / 2), 0, 0, INTER_NEAREST);
imshow("最近邻", narrow);
resize(src, narrow, Size(src.cols / 2, src.rows / 2), 0, 0, INTER_CUBIC);
imshow("双三次", narrow);
resize(src, narrow, Size(src.cols / 2, src.rows / 2), 0, 0, INTER_AREA);
imshow("像素区域重采样", narrow);
resize(src, narrow, Size(src.cols / 2, src.rows / 2), 0, 0, INTER_LANCZOS4);
imshow("插值", narrow);
pyrDown(src, mPyr); //先使用卷积核对源图像进行卷积,然后去除所有的偶数行和列
imshow("降采样", mPyr);
pyrUp(mPyr, mPyrU); //先去除偶数行和列,然后用4倍于降采样卷积核对图像进行卷积
imshow("升采样", mPyrU);
}
/* 边缘处理得到图片轮廓 */
void PictureAlgorithm::test9()
{
Mat image = imread("../x64/Debug/picture/8.jpg", IMREAD_REDUCED_COLOR_2);
imshow("22", image);
Mat meanCon;
blur(image, meanCon, Size(3,3), Point(-1, -1)); //均值卷积(卷积之后,边缘更简单)
Mat dst;
Canny(meanCon, dst, 50, 100, 3, false); //边缘处理
imshow("4", dst);
Mat mask;
mask.create(image.size(), image.type());
image.copyTo(mask, dst);
imshow("5", mask);
//先两次降采样,再边缘处理(得到卡通画)
Mat mask1,dst1;
pyrDown(image, mask1);
pyrDown(mask1, mask1);
Canny(mask1, dst1, 50, 100, 3, false);
namedWindow("6", WINDOW_NORMAL);
imshow("6", dst1);
}
/* 取向量集的协方差矩阵 */
void PictureAlgorithm::test10()
{
Mat_<float> samples[3];
for(int i = 0; i < 3; i++) {
samples[i].create(1, 3);
samples[i](0, 0) = i*3 + 1;
samples[i](0, 1) = i*3 + 2;
samples[i](0, 2) = i*3 + 3;
}
Mat_<double> covMat;
Mat_<double> meanMat;
calcCovarMatrix(samples, 3, covMat, meanMat, CovarFlags::COVAR_NORMAL);
std::string fileName = "test.txt";
std::cout << "samples[0]" << samples[0] << std::endl;
std::cout << "samples[1]" << samples[1] << std::endl;
std::cout << "samples[2]" << samples[2] << std::endl;
std::cout << "meanMat" << meanMat << std::endl;
std::cout << "covMat" << covMat << std::endl;
}
/* 直角坐标和极坐标转换 */
void PictureAlgorithm::test11()
{
//直角坐标->极坐标
Mat x = (Mat_<float>(3, 1) << 3, 6, 1);
Mat y = (Mat_<float>(3, 1) << 4, 8, 1);
Mat magnitude, angle;
cartToPolar(x, y, magnitude, angle);
std::cout << "\nmagnitude: " << magnitude.t() << std::endl;
std::cout << "\nangle: " << angle.t() * 180. / CV_PI << std::endl;
//极坐标->直角坐标
Mat magnitude1 = (Mat_<float>(3, 1) << 5, 10, 1.4142135);
Mat angle1 = (Mat_<float>(3, 1) << 0.92740309, 0.92740309, 0.78523159);
Mat x1, y1;
polarToCart(magnitude1, angle1, x1, y1);
std::cout << "\nx: " << x1.t() << std::endl;
std::cout << "\ny: " << y1.t() << std::endl;
//笛卡尔转对数极坐标
Mat image = imread("../x64/Debug/picture/2.jpg", IMREAD_GRAYSCALE);
Point2f center(480, 640);
Mat dst;
float M = 100;//M取值越大,在水平方向得到的值越多
logPolar(image, dst, center, M, WARP_FILL_OUTLIERS);
imshow("对数极坐标变换", dst);
}
/* 检查范围和图片比较 */
void PictureAlgorithm::test12()
{
Mat image = imread("../x64/Debug/picture/1.jpg", IMREAD_REDUCED_COLOR_2);
Mat image2 = imread("../x64/Debug/picture/4.jpg", IMREAD_REDUCED_COLOR_2);
bool ret = checkRange(image, true);
Mat image3;
cv::compare(image, image2, image3, CMP_GT); //大于
imshow("6", image3);
}
/* 矩阵对称 */
void PictureAlgorithm::test13()
{
float fData[25] = { 0 };
for(int i = 0; i < 25; i++) {
fData[i] = i+10;
}
float fData1[25] = { 0 };
for(int i = 0; i < 25; i++) {
fData1[i] = i+10;
}
cv::Mat m(5, 5, CV_32FC1, fData);
cv::Mat n(5, 5, CV_32FC1, fData1);
std::cout << m << "\n" << std::endl;
completeSymm(m, true);
completeSymm(n, false);
std::cout << m << "\n" << std::endl;
std::cout << n << "\n" << std::endl;
Mat image = imread("../x64/Debug/picture/17.jpg", IMREAD_REDUCED_COLOR_2); //必须是正方形图片
completeSymm(image, false);
imshow("6", image);
}
/* 图像增强 */
void PictureAlgorithm::test14()
{
Mat image = imread("../x64/Debug/picture/3.jpg", IMREAD_REDUCED_COLOR_2);
Mat image1;
convertScaleAbs(image, image1, 1.2, 20);
imshow("6", image);
imshow("7", image1);
//矩阵非0个数
Mat image2 = imread("../x64/Debug/picture/3.jpg", IMREAD_GRAYSCALE);
int count = countNonZero(image2); //计算矩阵中非0元素的个数(必须是单通道)
std::cout << count << "\n" << std::endl;
}
/* 离散余弦变换和逆变换 */
void PictureAlgorithm::test15()
{
Mat image = imread("../x64/Debug/picture/2.jpg", IMREAD_GRAYSCALE);
Mat image1;
image.convertTo(image1, CV_32F);
normalize(image1, image1, 1.0, 0, NORM_MINMAX); //先归一化
imshow("1", image1);
Mat image2;
cv::dct(image1, image2, DCT_INVERSE);
imshow("22", image2);
Mat image3;
cv::idct(image2, image3, DCT_INVERSE); //逆变换会丢帧
imshow("3", image3);
}
/* 傅里叶变换和逆变换 */
void PictureAlgorithm::test16()
{
Mat image = imread("../x64/Debug/picture/2.jpg", IMREAD_GRAYSCALE);
Mat image1;
image.convertTo(image1, CV_32F);
normalize(image1, image1, 1.0, 0, NORM_MINMAX);//先归一化
imshow("1", image1);
Mat image2;
cv::dft(image1, image2, DFT_INVERSE);
imshow("22", image2);
Mat image3;
cv::idft(image2, image3, DCT_INVERSE); //逆变换会丢帧
imshow("3", image3);
}
/* 获得矩阵的特征值和特征矢量 */
void PictureAlgorithm::test17()
{
Mat image = imread("../x64/Debug/picture/1.jpg", IMREAD_GRAYSCALE);
Mat image1 = imread("../x64/Debug/picture/6.jpg", IMREAD_GRAYSCALE);
Mat dst;
divide(image, image1, dst); //相除
imshow("7", dst);
Mat src = imread("../x64/Debug/picture/10.jpg", IMREAD_GRAYSCALE);
Mat src1, dst1, dst2;
src.convertTo(src1, CV_32FC1);
normalize(src1, src1, 1.0, 0, NORM_MINMAX); //先归一化
eigen(src1, dst1, dst2); //特征值是降序排序,dst1是特征值,dst2是特征向量
imshow("1", dst1);
std::cout << dst1 << "\n" << std::endl;
}
/* 指数 */
void PictureAlgorithm::test18()
{
Mat src = imread("../x64/Debug/picture/3.jpg", IMREAD_GRAYSCALE);
Mat src1, src2;
src.convertTo(src1, CV_32FC1);
exp(src1, src2);
imshow("1", src2);
}
/* 广义矩阵乘法 */
void PictureAlgorithm::test19()
{
RNG &rng = theRNG();
Mat_<float> A(5, 10, CV_32F);
Mat_<float> B(10, 5, CV_32F);
Mat_<float> C = Mat::zeros(5, 5, CV_32F);
rng.fill(A, RNG::NORMAL, 1, 100); //mean=1,stddev=100
rng.fill(B, RNG::NORMAL, 2, 50);
gemm(A, B, 1, C, 0, C);
cout << A << "\n" << endl;
cout << B << "\n" << endl;
cout << C << "\n" << endl;
}
/* 求矩阵的逆 */
void PictureAlgorithm::test20()
{
float fData[25] = { 0 };
for(int i = 0; i < 25; i++) {
fData[i] = i+10;
}
cv::Mat m(5, 5, CV_32FC1, fData);
Mat src2;
invert(m, src2, DECOMP_SVD); //奇异值分解
std::cout << m << "\n" << std::endl;
std::cout << src2 << "\n" << std::endl;
}
/* 自然对数 */
void PictureAlgorithm::test21()
{
float fData[25] = { 0 };
for(int i = 0; i < 25; i++) {
fData[i] = i+10;
}
cv::Mat m(5, 5, CV_32FC1, fData);
Mat src1;
log(m, src1);
std::cout << m << "\n" << std::endl;
std::cout << src1 << "\n" << std::endl;
}
/* 矩阵的查找表的转换 */
void PictureAlgorithm::test22()
{
//单通道
uchar lutData1[256];
for(int i = 0; i<256; i++) { //灰度为0-100的像素的灰度变成0,101-200的变成100,201-255的变成255
if(i <= 100) {
lutData1[i] = 0;
}
if (i > 100 && i <= 200) {
lutData1[i] = 100;
}
if (i > 200) {
lutData1[i] = 255;
}
}
Mat lut(1, 256, CV_8UC1, lutData1);
Mat src1, src3;
Mat src = imread("../x64/Debug/picture/11.jpg", IMREAD_GRAYSCALE);
LUT(src, lut, src1); //执行数组的查询表转换
imshow("1", src);
imshow("22", src1);
//三通道
uchar lutData[256 * 3];
int j = 0;
for(int i = 0; i<256; i++) {
if (i <= 100) {
lutData[i * 3] = 0;
lutData[i * 3 + 1] = 0;
lutData[i * 3 + 2] = 0;
}
if (i > 100 && i <= 200) {
lutData[i * 3] = 100;
lutData[i * 3 + 1] = 100;
lutData[i * 3 + 2] = 100;
}
if (i > 200) {
lutData[i * 3] = 255;
lutData[i * 3 + 1] = 255;
lutData[i * 3 + 2] = 255;
}
}
Mat lut1(1, 256, CV_8UC3, lutData);
Mat src2 = imread("../x64/Debug/picture/11.jpg", IMREAD_REDUCED_COLOR_2);
LUT(src2, lut1, src3);
imshow("3", src2);
imshow("4", src3);
}
/* 马氏距离 */
void PictureAlgorithm::test23()
{
Mat Pt(2, 5, CV_64FC1);
Pt.at<double>(0, 0) = 2;
Pt.at<double>(1, 0) = 4;
Pt.at<double>(0, 1) = 2;
Pt.at<double>(1, 1) = 2;
Pt.at<double>(0, 2) = 3;
Pt.at<double>(1, 2) = 3;
Pt.at<double>(0, 3) = 4;
Pt.at<double>(1, 3) = 4;
Pt.at<double>(0, 4) = 4;
Pt.at<double>(1, 4) = 2;
std::cout << Pt << std::endl;
//计算协方差矩阵
Mat coVar, meanVar;
calcCovarMatrix(Pt, coVar, meanVar, COVAR_NORMAL|COVAR_COLS);
std::cout << "Covar is:\n" << coVar << std::endl;
std::cout << "Mean is:\n" << meanVar << std::endl;
//计算协方差的逆
Mat iCovar;
invert(coVar, iCovar, DECOMP_SVD);
Mat pt1(2, 1, CV_64FC1);
Mat pt2(2, 1, CV_64FC1);
pt1.at<double>(0, 0) = 1;
pt1.at<double>(1, 0) = 1;
pt2.at<double>(0, 0) = 5;
pt2.at<double>(1, 0) = 5;
double Maha1 = Mahalanobis(pt1, meanVar, iCovar);
double Maha2 = Mahalanobis(pt2, meanVar, iCovar);
std::cout << "Maha 距离1是:\t" << Maha1 << std::endl;
std::cout << "Maha 距离2是:\t" << Maha2 << std::endl;
}
/* 将得到的频谱相乘 */
void PictureAlgorithm::test24()
{
Mat image = imread("../x64/Debug/picture/2.jpg", IMREAD_GRAYSCALE);
Mat image4 = imread("../x64/Debug/picture/5.jpg", IMREAD_GRAYSCALE);
Mat image1;
image.convertTo(image1, CV_32F);
Mat image2;
cv::dft(image1, image2, DFT_INVERSE);
imshow("22", image2);
Mat image11;
image4.convertTo(image11, CV_32F);
Mat image22;
cv::dft(image11, image22, DFT_INVERSE);
imshow("1", image22);
Mat dst;
mulSpectrums(image2, image22, dst, DFT_COMPLEX_OUTPUT); //将得到的频谱相乘
imshow("4", dst);
Mat dst1;
mulTransposed(image2, dst1, 1); //计算矩阵及其转置的乘积
imshow("5", dst1);
}
//获得构建的主要方向
double getOrientation(std::vector<Point> &pts, Mat &img)
{
//构建pca数据。这里做的是将轮廓点的x和y作为两个维压到data_pts中去。
Mat data_pts = Mat(pts.size(), 2, CV_64FC1);//使用mat来保存数据,也是为了后面pca处理需要
for(int i = 0; i < data_pts.rows; ++i) {
data_pts.at<double>(i, 0) = pts[i].x;
data_pts.at<double>(i, 1) = pts[i].y;
}
PCA pca_analysis(data_pts, Mat(), 0); //执行PCA分析
Point pos = Point(pca_analysis.mean.at<double>(0, 0), pca_analysis.mean.at<double>(0, 1)); //获得最主要分量(均值,在本例中,对应的就是轮廓中点,也是图像中点
std::vector<Point2d> eigen_vecs(2); //特征值
std::vector<double> eigen_val(2); //特征值
for(int i = 0; i < 2; ++i) {
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0), pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(i, 0); //在轮廓/图像中点绘制小圆
circle(img, pos, 3, CV_RGB(255, 0, 255), 2);
//计算出直线,在主要方向上绘制直线(每个特征向量乘以其特征值并转换为平均位置。有一个 0.02 的缩放系数,它只是为了确保矢量适合图像并且没有 10000 像素的长度)
line(img, pos, pos + 0.02 * Point(eigen_vecs[0].x * eigen_val[0], eigen_vecs[0].y * eigen_val[0]), CV_RGB(255, 255, 0), 1, LINE_AA);
line(img, pos, pos + 0.02 * Point(eigen_vecs[1].x * eigen_val[1], eigen_vecs[1].y * eigen_val[1]), CV_RGB(0, 255, 255), 1, LINE_AA);
return atan2(eigen_vecs[0].y, eigen_vecs[0].x); //最终计算并返回一个最强的(即具有最大特征值)的特征向量的角度
}
}
/* PCA获取物体主要方向(形心) */
void PictureAlgorithm::test25()
{
Mat src = imread("../x64/Debug/picture/12.png", IMREAD_REDUCED_COLOR_2);
imshow("输入图像", src);
Mat gray,binary;
cvtColor(src, gray, COLOR_BGR2GRAY);
threshold(gray, binary, 150, 255, THRESH_BINARY); //阈值处理
imshow("二值化", binary);
std::vector<std::vector<Point> > contours;
std::vector<Vec4i> hierarchy;
findContours(binary, contours, hierarchy, RETR_LIST, CHAIN_APPROX_NONE); //寻找轮廓
for(size_t i = 0; i < contours.size(); ++i) { //轮廓分析,找到工件
double area = contourArea(contours[i]); //计算轮廓大小
if (area < 1e2 || 1e4 < area) { //去除过小或者过大的轮廓区域(科学计数法表示le2表示1X10的2次方)
continue;
}
drawContours(src, contours, i, Scalar(0, 0, 255), 2, 8, hierarchy, 0); //绘制轮廓
std::string str = "轮廓:" + std::to_string(i);
imshow(str, src);
double angle = getOrientation(contours[i], src); //画轮廓的中心和寻找每一个轮廓的方向
std::cout << angle << std::endl;
std::string str1 = "中心角度:" + std::to_string(i);
imshow(str1, src);
}
}
//把图像归一化为0-255,便于显示
Mat norm_0_255(const Mat& src)
{
Mat dst;
switch (src.channels()) {
case 1:
cv::normalize(src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
break;
case 3:
cv::normalize(src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
break;
default:
src.copyTo(dst);
break;
}
return dst;
}
//转化给定的图像为行矩阵
Mat asRowMatrix(const std::vector<Mat>& src, int rtype, double alpha = 1, double beta = 0)
{
size_t n = src.size(); //样本数量
if (n == 0) { //如果没有样本,返回空矩阵
return Mat();
}
size_t d = src[0].total(); //样本的维数
Mat data(n, d, rtype);
for(int i = 0; i < n; i++) { //拷贝数据
Mat xi = data.row(i);
if (src[i].isContinuous()) { //转化为1行,n列的格式
src[i].reshape(1, 1).convertTo(xi, rtype, alpha, beta);
}
else {
src[i].clone().reshape(1, 1).convertTo(xi, rtype, alpha, beta);
}
}
return data;
}
/* PCA对数据集降维处理 */
void PictureAlgorithm::test26()
{
std::vector<Mat> db;
db.push_back(imread("../x64/Debug/picture/13.png", IMREAD_GRAYSCALE));
db.push_back(imread("../x64/Debug/picture/14.png", IMREAD_GRAYSCALE));
Mat data = asRowMatrix(db, CV_32FC1); //用行中的观察结果构建一个矩阵
int num_components = 1; //PCA算法保持5主成分分量
PCA pca(data, Mat(), 0, num_components); //执行pca算法
Mat mean = pca.mean.clone(); //copy pca算法结果
Mat eigenvalues = pca.eigenvalues.clone();
Mat eigenvectors = pca.eigenvectors.clone();
imshow("avg", norm_0_255(mean.reshape(1, db[0].rows))); //均值脸
imshow("pc1", norm_0_255(pca.eigenvectors.row(0)).reshape(1, db[0].rows)); //特征脸
}
/* SVD压缩图像 */
void PictureAlgorithm::test27()
{
Mat image = imread("../x64/Debug/picture/15.jpg", IMREAD_GRAYSCALE);
imshow("1", image);
Mat temp(image.size(), CV_32FC1, Scalar(0));
image.convertTo(image, CV_32FC1);
Mat U, W, V;
SVD::compute(image, W, U, V);//opencv得到的V与MATLAB相比已经经过转置了,要想再转置一遍可以用V=V.t();
Mat w(image.rows, image.rows, CV_32FC1, Scalar(0));//opencv进行SVD分解后得到的奇异值W不是放入对角矩阵,而是一个列向量中,所以需要自己将其变换为对角矩阵
double theratio = 0.1; //压缩比例--数值越小,压缩越厉害
int len = theratio * W.rows;
for(int i = 0; i < len; i++) {
w.ptr<float>(i)[i] = W.ptr<float>(i)[0];
}
temp = U * w * V;
temp.convertTo(temp, CV_8UC1);
namedWindow("22", WINDOW_FREERATIO);
imshow("22", temp);
}
/* 腐蚀 */
void PictureAlgorithm::test28()
{
//例子1
Mat test = cv::Mat::zeros(640, 640, CV_8UC1);
rectangle(test, cv::Rect(30, 30, 50, 50), 255, -1);
Mat element = getStructuringElement(MORPH_RECT, Size(9, 9));
Mat result;
erode(test, result, element);
imshow("original", test);
imshow("result", result);
//例子2
Mat a = imread("../x64/Debug/picture/15.jpg");
Mat b;
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
erode(a, b, kernel);
imshow("a", a);
imshow("b", b);
//例子3
Mat d;
morphologyEx(a, d, MORPH_ERODE, kernel); //和b效果一样
imshow("d", d);
}
/* 膨胀 */
void PictureAlgorithm::test29()
{
//例子1
Mat test = cv::Mat::zeros(640, 640, CV_8UC1);
rectangle(test, cv::Rect(30, 30, 50, 50), 255, -1);
Mat element = getStructuringElement(MORPH_RECT, Size(9, 9));
Mat result;
dilate(test, result, element);
imshow("original", test);
imshow("result", result);
//例子1
Mat a = imread("../x64/Debug/picture/15.jpg");
Mat b, d;
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
dilate(a, b, kernel);
erode(b, d, kernel); //膨胀后再腐蚀不能得到原图
imshow("a", a);
imshow("b", b);
imshow("d", d);
//例子3
Mat e;
morphologyEx(a, e, MORPH_DILATE, kernel); //和b效果一样
imshow("e", e);
}
/* morphologyEx更多功能 */
void PictureAlgorithm::test30()
{
Mat kernel = getStructuringElement(MORPH_RECT, Size(7, 7));
//开运算
Mat a = imread("../x64/Debug/picture/18.png");
Mat b;
morphologyEx(a, b, MORPH_OPEN, kernel); //消除毛刺
imshow("a", a);
imshow("b", b);
//闭运算
Mat c;
morphologyEx(a, c, MORPH_CLOSE, kernel); //消除黑点(点大了,kernel也要大一点,要不然消不掉)
imshow("c", c);
//形态学梯度运算
Mat aa = imread("../x64/Debug/picture/19.png");
Mat d;
morphologyEx(aa, d, MORPH_GRADIENT, kernel); //膨胀图像减去腐蚀图像
imshow("aa", aa);
imshow("d", d);
//礼帽运算
Mat e;
morphologyEx(a, e, MORPH_TOPHAT, kernel); //原始图像减去其开运算图像
imshow("e", e);
//黑帽运算
Mat f;
morphologyEx(a, f, MORPH_BLACKHAT, kernel); //原始图像减去其闭运算图像
imshow("f", f);
//击中击不中(可以提取二值图像中的一些特殊区域,得到我们想要的结果)
Mat binary, g;
Mat aaa = imread("../x64/Debug/picture/19.png", IMREAD_GRAYSCALE);
threshold(aaa, binary, 0, 255, THRESH_BINARY_INV | THRESH_OTSU);
morphologyEx(binary, g, MORPH_HITMISS, kernel); //作用类似取反
imshow("g", g);
aaa = imread("../x64/Debug/picture/20.png", IMREAD_GRAYSCALE);
threshold(aaa, binary, 0, 255, THRESH_BINARY_INV | THRESH_OTSU);
morphologyEx(binary, g, MORPH_HITMISS, kernel); //作用类似取结节
imshow("aaa", aaa);
imshow("gg", g);
}
/* threshold和accumulate */
void PictureAlgorithm::test31()
{
Mat a = imread("../x64/Debug/picture/6.jpg");
Mat b;
std::vector<Mat> vPlanes;
split(a, vPlanes);
addWeighted(vPlanes[2], 1.0/3.0, vPlanes[1], 1.0/3.0, 0, b);
addWeighted(b, 1, vPlanes[0], 1.0/3.0, 0, b);
threshold(b, b, 100, 255, THRESH_TRUNC);
imshow("b", b);
Mat c = Mat::zeros(a.size(), CV_32F);
Mat cc = Mat::zeros(a.size(), CV_32F);
a.convertTo(a, CV_32F);
split(a, vPlanes);
accumulate(vPlanes[2], c);
accumulate(vPlanes[1], c);
accumulate(vPlanes[0], c); //3
accumulateWeighted(vPlanes[2], cc, 0.5);
accumulateWeighted(vPlanes[1], cc, 0.5);
accumulateWeighted(vPlanes[0], cc, 0.5); //3.25
threshold(c, c, 99, 200, THRESH_BINARY_INV);
threshold(cc, cc, 99, 200, THRESH_BINARY_INV);
imshow("c", c);
imshow("c2", cc);
FileStorage cWrite;
cWrite.open("../x64/Debug/xml/xml1.xml", FileStorage::Mode::WRITE);
cWrite << "pic" << c;
cWrite.release();
//智能阈值THRESH_OTSU和THRESH_TRIANGLE
Mat src = imread("../x64/Debug/picture/22.jpg", IMREAD_GRAYSCALE);
Mat dst;
threshold(src, dst, 100, 255, THRESH_OTSU);
imshow("dst", dst);
imwrite("../x64/Debug/picture/22_copy.jpg", dst);
threshold(src, dst, 100, 255, THRESH_TRIANGLE);
imshow("dst1", dst);
imwrite("../x64/Debug/picture/22_copy1.jpg", dst);
}
/* 自适应阈值 */
void PictureAlgorithm::test32()
{
//每个像素的二值化阈值不是固定的,而是由邻域像素的分布决定的。
//亮度较高的区域的二值化阈值通常较高,而亮度较低的区域的二值化阈值则会相适应地变小。
//不同亮度、对比度、纹理的局部图像区域将会拥有相对应的局部二值化阈值。
Mat a = imread("../x64/Debug/picture/8.jpg", IMREAD_GRAYSCALE);
Mat b;
adaptiveThreshold(a, b, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 3, 1);
imshow("a", a);
imshow("b", b);
}
/* 拉普拉斯 */
void PictureAlgorithm::test33()
{
//满足不同方向的图像边缘锐化(边缘检测)的要求
Mat src = imread("../x64/Debug/picture/21.png");
Mat grad1;
Laplacian(src, grad1, CV_64F, 5, 1, 0, BORDER_DEFAULT);
Mat dst;
convertScaleAbs(grad1, dst);
imshow("111", dst);
}
/* 用线性滤波做卷积 */
void PictureAlgorithm::test34()
{
Mat image = imread("../x64/Debug/picture/5.jpg", IMREAD_REDUCED_COLOR_2);
imshow("22", image);
//Mat kernel = (Mat_<char>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0); //锐化算子
//Mat kernel = (Mat_<char>(3, 3) << -1, -1, -1, -1, 8, -1, -1, -1, -1);
Mat kernel = (Mat_<char>(3, 3) << -1, -2, -1, 0, 0, 0, 1, 2, 1); //效果类似求梯度
//Mat kernel = (Mat_<char>(3, 3) << 1, 1, 1, 1, -8, 1, 1, 1, 1);
Mat filter;
QString timestamp1 = QString::number(QDateTime::currentMSecsSinceEpoch());
filter2D(image, filter, CV_32FC3, kernel);
QString timestamp2 = QString::number(QDateTime::currentMSecsSinceEpoch());
int sdsjsj = timestamp2.toLongLong() - timestamp1.toLongLong();
imshow("filter", filter);
Mat image2 = imread("../x64/Debug/picture/12.png", IMREAD_REDUCED_COLOR_2);
image2.convertTo(image2, CV_32F);
Mat xy1 = getGaborKernel(cv::Size(3, 3), 1.0, 0, CV_PI/8, 0.5, 0, CV_32F); //纹理分析的线性滤波器
std::cout << xy1 << std::endl;
filter2D(image2, filter, CV_32FC3, xy1);
imshow("Gabor", filter);
}
/* 可分离线性滤波 */
void PictureAlgorithm::test35()
{
Mat image = imread("../x64/Debug/picture/21.png", IMREAD_REDUCED_COLOR_2);
Mat kx, ky;
getDerivKernels(kx, ky, 0, 2, 3); //导数的滤波器系数
std::cout << kx << std::endl;
std::cout << ky << std::endl;
Mat xy = getGaussianKernel(3, 1.5); //高斯滤波器系数矩阵
std::cout << xy << std::endl;
Mat filter;
QString timestamp1 = QString::number(QDateTime::currentMSecsSinceEpoch());
sepFilter2D(image, filter, image.depth(), kx, ky);
QString timestamp2 = QString::number(QDateTime::currentMSecsSinceEpoch());
int sdsjsj = timestamp2.toLongLong() - timestamp1.toLongLong();
imshow("filter", filter);
sepFilter2D(image, filter, image.depth(), xy, xy);
imshow("GaussianKernel", filter);
}
/* 高斯金字塔 */
void PictureAlgorithm::test36()
{
Mat src = imread("../x64/Debug/picture/5.jpg", IMREAD_REDUCED_COLOR_2);
imshow("1", src);
std::vector<Mat> vPyramid;
buildPyramid(src, vPyramid, 5);
for(size_t i = 0; i < vPyramid.size(); i++) {
std::string strNamr = "高斯金字塔" + std::to_string(i);
imshow(strNamr, vPyramid[i]);
}
}
struct userdata {
Mat im;
std::vector<Point2f> points;
};
/* 透视变换 */
void PictureAlgorithm::test37()
{
Mat src = imread("../x64/Debug/picture/5.jpg", IMREAD_REDUCED_COLOR_2);
imshow("原图", src);
Point2f src_points[] = {
Point2f(0, 0),
Point2f(src.cols-1, 0),
Point2f(src.cols-1, src.rows-1),
Point2f(0, src.rows-1) };
Point2f dst_points[] = {
Point2f(src.cols*0.05, src.rows*0.33),
Point2f(src.cols*0.9, src.rows*0.25),
Point2f(src.cols*0.8, src.rows*0.9),
Point2f(src.cols*0.2, src.rows*0.7) };
Mat M = getPerspectiveTransform(src_points, dst_points);
Mat perspective;
warpPerspective(src, perspective, M, src.size(), INTER_LINEAR);
imshow("result", perspective);
}
/* 图像的重映射 */
void PictureAlgorithm::test38()
{
Mat srcImage = imread("../x64/Debug/picture/5.jpg", IMREAD_REDUCED_COLOR_2);
imshow("原图", srcImage);
Mat dstImage;
Mat map_x, map_y;
dstImage.create(srcImage.size(), srcImage.type() );
map_x.create(srcImage.size(), CV_32FC1 );
map_y.create(srcImage.size(), CV_32FC1 );
//左右颠倒
for(int j = 0; j < srcImage.rows;j++) {
for(int i = 0; i < srcImage.cols;i++) {
map_x.at<float>(j, i) = static_cast<float>(srcImage.cols - i);
map_y.at<float>(j, i) = static_cast<float>(j);
}
}
remap(srcImage, dstImage, map_x, map_y, INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0,0));
imshow("左右颠倒", dstImage );
//上下颠倒
for(int j = 0; j < srcImage.rows;j++) {
for(int i = 0; i < srcImage.cols;i++) {
map_x.at<float>(j, i) = static_cast<float>(i);
map_y.at<float>(j, i) = static_cast<float>(srcImage.rows - j);
}
}
remap(srcImage, dstImage, map_x, map_y, INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0,0));
imshow("上下颠倒", dstImage );
}
/* 图像修复 */
void PictureAlgorithm::test39()
{
Mat src = imread("../x64/Debug/picture/23_dirty.jpg", IMREAD_UNCHANGED);
//Mat src = imread("../x64/Debug/picture/24.jpg", IMREAD_UNCHANGED);
imshow("原图", src);
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
imshow("灰", gray);
//图像二值化,筛选出白色区域部分
Mat thresh;
threshold(gray, thresh, 240, 255, THRESH_BINARY); //想要剔除的线灰度值在240-250
//threshold(gray, thresh, 160, 255, THRESH_BINARY);
imshow("二值化", thresh);
//提取图片下方的水印,制作掩模图像
Mat mask = Mat::zeros(src.size(), CV_8U);
int height = src.rows;
int width = src.cols;
int start = 0 * height; //从这个高度开始修复,上面部位不修复
//int start = 0.8 * height;
//遍历图像像素,提取出水印部分像素,制作掩模图像
for(int i = start; i < height; i++) {
uchar*data = thresh.ptr<uchar>(i);
for(int j = 0; j < width; j++) {
if (data[j] == 255) {
mask.at<uchar>(i, j) = 255;
}
}
}
Mat kernel = getStructuringElement(MORPH_RECT, Size(5, 5));
dilate(mask, mask, kernel); //将掩模进行膨胀,使其能够覆盖图像更大区域
imshow("掩码", mask);
Mat result;
inpaint(src, mask, result, 1, INPAINT_NS); //块状物修复有缺陷
imshow("修复后", result);
}
/* 直方图对比 */
void PictureAlgorithm::test40()
{
Mat image = imread("../x64/Debug/picture/1.jpg");
Mat image2 = imread("../x64/Debug/picture/4.jpg");
int channels[1] = {0}; //0表示第一通道
int bins[1] = {256};
float hRange[2] = {0, 255};
const float *ranges[1] = {hRange}; //必须是const
Mat calcDst; //输出的直方图数组
Mat calcDst2; //输出的直方图数组
calcHist(&image, 1, channels, Mat(), calcDst, 1, bins, ranges, true, false); //计算直方图的数据
calcHist(&image2, 1, channels, Mat(), calcDst2, 1, bins, ranges, true, false); //计算直方图的数据
double src_src111 = compareHist(calcDst, calcDst, HISTCMP_CORREL);
double src_src = compareHist(calcDst, calcDst2, HISTCMP_CORREL);
}
/* 度量(直方图相似度比较) */
void PictureAlgorithm::test41()
{
//EMD的思想是求得从一幅图像转化为另一幅图像的代价,用直方图来表示就是求得一个直方图转化为另一个直方图的代价,代价越小,越相似
vector<Mat> src;//迭代器push_back
Mat temp = imread("../x64/Debug/picture/1.jpg", IMREAD_COLOR);
int m = temp.rows / 2;
int n = temp.cols;
Mat image_cut = Mat(temp, Rect(0, 0, n, m)).clone(); //将一幅图分割为上下两部分
Mat image_cut2 = Mat(temp, Rect(0, m, n, m)).clone();
src.push_back(image_cut); src.push_back(image_cut2); //放入两张横移相似图片
temp = imread("../x64/Debug/picture/1.jpg", IMREAD_COLOR);
src.push_back(temp);
temp = imread("../x64/Debug/picture/1_move.jpg", IMREAD_COLOR);
src.push_back(temp);
vector<Mat> hsv(4), hist(4);
int scale=10, histSize[] = {8, 8}, ch[] = {0, 1};//30rows,32cols
float h_ranges[] = {0, 180};
float s_ranges[] = {0, 255};
const float* ranges[] = { h_ranges,s_ranges };
vector<Mat> sig(4);
for(int i = 0; i < 4 ; i++) {
cvtColor(src[i], hsv[i], COLOR_RGB2HSV);
calcHist(&hsv[i], 1, ch, noArray(), hist[i], 2, histSize, ranges, true);
//do EMD
vector<Vec3f> sigv;
normalize(hist[i], hist[i], 1, 0, NORM_L1);
for(int h = 0; h < histSize[0]; h++) {
for(int s = 0; s < histSize[1]; s++) {
float hval = hist[i].at<float>(h, s);
if (hval != 0)
sigv.push_back(Vec3f(hval, (float)h, (float)s));
}
}
sig[i] = Mat(sigv).clone().reshape(1);
cout << "&&&&&&&:" << EMD(sig[0], sig[i], DIST_L2) << endl;
}
}
/* 反向投影 */
void PictureAlgorithm::test42()
{
Mat img = imread("../x64/Debug/picture/29.jpg", IMREAD_COLOR);
//对比阀值前后的效果差异
Mat hsv, hue;
int nChannels[] = {0, 0};
cvtColor(img, hsv, COLOR_BGR2HSV);
hue.create(hsv.size(), hsv.depth());
mixChannels(&hsv, 1, &hue, 1, nChannels, 1); //复制特定通道的图像
imshow("mixChannels后", hue); //能看清
Mat h_hist;
int n = 15;
float range[] = {0, 180};
const float *ranges = {range};
calcHist(&hue, 1, 0, Mat(), h_hist, 1, &n, &ranges, true, false);
normalize(h_hist, h_hist, 0, 255, NORM_MINMAX, -1, Mat());
int img_h = 700; //图片的高(行数)
int img_w = 512; //图片的宽(列数)
int line_w = 30; //线宽
Mat backPrjImage;
calcBackProject(&hue, 1, 0, h_hist, backPrjImage, &ranges, 1, true);
imshow("反向投影1", backPrjImage); //能看清
//下面三个例子看不清投影
threshold(hue, hue, 80, 180, THRESH_BINARY); //阀值后
calcHist(&hue, 1, 0, Mat(), h_hist, 1, &n, &ranges, true, false);
normalize(h_hist, h_hist, 0, 255, NORM_MINMAX, -1, Mat());
calcBackProject(&hue, 1, 0, h_hist, backPrjImage, &ranges, 1, true);
imshow("阀值后反向投影", backPrjImage); //看不清
//这里测试下同一个物体在两个图片里的效果,注意,虽然是两个图片,这里我取得是同一相机拍摄的图片
Mat img2 = imread("../x64/Debug/picture/30.jpg", IMREAD_COLOR);
Mat hsv2, hue2;
int nChannels2[] = {0, 0};
cvtColor(img2, hsv2, COLOR_BGR2HSV);
hue2.create(hsv2.size(), hsv2.depth());
mixChannels(&hsv2, 1, &hue2, 1, nChannels2, 1);
threshold(hue2, hue2, 70, 180, THRESH_BINARY); //注意,两个图片的预处理是不一样的
imshow("mixChannels后2", hue2); //看不清
Mat backPrjImage2;
calcBackProject(&hue2, 1, 0, h_hist, backPrjImage2, &ranges, 1, true);
imshow("反向投影2", backPrjImage2); //看不清
}
//根据圆心和夹角计算点的二维坐标
inline Point calcPoint(double R, double angle)
{
Point2f center(400, 400);
return center + Point2f((float)cos(angle), (float)-sin(angle))*(float)R; //图像坐标系中y轴向下
}
void drawCross(Mat img, Point center, Scalar color, int d)
{
line(img, Point(center.x - d, center.y - d), Point(center.x + d, center.y + d), color, 1, LINE_AA, 0);
line(img, Point(center.x + d, center.y - d), Point(center.x - d, center.y + d), color, 1, LINE_AA, 0);
}
/* 匀速圆周运动预测 */
void PictureAlgorithm::test43()
{
Mat img(800, 800, CV_8UC3);
KalmanFilter KF(2, 1, 0);
Mat state(2, 1, CV_32F); //状态向量x(k)<=>state (角度,△角度)
Mat measurement = Mat::zeros(1, 1, CV_32F); //测量向量z(k)
Mat processNoise(2, 1, CV_32F); //系统状态噪声w(k)
char code = (char)-1;
for(;;) {
//返回高斯分布的随机矩阵
randn(state, Scalar::all(0), Scalar::all(0.1)); //初始化系统状态真实值
/*初始化三个矩阵:状态转移矩阵Ak(transitionMatrix),控制矩阵Bk(controlMatrix)和测量矩阵Hk(measurementMatrix)*/
KF.transitionMatrix = (Mat_<float>(2, 2) << 1, 1, 0, 1); //匀速运动模型中的状态转移矩阵A
setIdentity(KF.measurementMatrix); //测量矩阵H [1,0]
/*两个噪声的协方差矩阵*/
setIdentity(KF.processNoiseCov, Scalar::all(1e-5)); //系统噪声方差矩阵Q[9.9999997e-06, 0; 0, 9.9999997e-06]
setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1)); //测量噪声方差矩阵R [0.1]
/*应该是指k-1时刻的后验状态和协方差*/
randn(KF.statePost, Scalar::all(0), Scalar::all(0.1)); //初始化后验估计的状态向量
setIdentity(KF.errorCovPost, Scalar::all(1)); //初始化后验协方差P(k) [1, 0;0, 1
for(;;) {
float R = img.cols / 3.0;
/*真实点*/
double stateAngle = state.at<float>(0); //跟踪点角度
Point statePt = calcPoint(R, stateAngle); //真实位置
/*预测点*/
Mat prediction = KF.predict(); //计算预测值
double predictAngle = prediction.at<float>(0);
Point predictPt = calcPoint(R, predictAngle); //预测位置
/*测量点(观测点),红色*/
randn(measurement, Scalar::all(0), Scalar::all(KF.measurementNoiseCov.at<float>(0))); //给measurement赋N(0,R)的随机值
measurement += KF.measurementMatrix * state; //观测位置 = 真实位置 + 观测位置噪声 (z = H*x(k) + R)
double measAngle = measurement.at<float>(0);
Point measPt = calcPoint(R, measAngle); //观测位置
/*估计点*/
if(theRNG().uniform(0,4) != 0) { //返回[0,4)范围内均匀分布的整数随机数,即四分之一机会返回0
KF.correct(measurement);
}
Mat gujizhi = KF.correct(measurement); //使用观测值更新估计值,函数返回值是最优状态估计(statePost)
double gujiAngle = gujizhi.at<float>(0);
Point gujiPt = calcPoint(R, gujiAngle); //估计位置
img = Scalar::all(0);
circle(img, Point2f(400, 400), R, Scalar::all(70), 1);
//实时更新三个位置
drawCross(img, statePt, Scalar(255,255,255), 12);
drawCross(img, measPt, Scalar(0,0,255), 9);
drawCross(img, predictPt, Scalar(0,255,0), 6);
drawCross(img, gujiPt, Scalar(255,255,0), 3);
randn(processNoise, Scalar(0), Scalar::all(sqrt(KF.processNoiseCov.at<float>(0, 0))));
state = KF.transitionMatrix*state + processNoise; //加入噪声的state
imshow("Kalman", img);
code = waitKey(400);
if(code == '2') {
break;
}
}
if(code == '3') {
break;
}
}
}
Point mousePosition= Point(400, 300);
void mouseEvent(int event, int x, int y, int flags, void *param )
{
if (event == EVENT_MOUSEMOVE) { //CV_EVENT_MOUSEMOVE :鼠标移动
mousePosition = Point(x, y);
}
}
/* 跟踪鼠标位置 */
void PictureAlgorithm::test44()
{
const int winHeight = 600;
const int winWidth = 800;
RNG rng;
//1.卡尔曼滤波器设置
const int stateNum = 4; //状态值4×1向量(x,y,△x,△y)
const int measureNum = 2; //测量值2×1向量(x,y)
KalmanFilter KF(stateNum, measureNum, 0);
/*初始化状态转移矩阵A 和 测量矩阵H*/
KF.transitionMatrix = (Mat_<float>(4, 4) <<1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1); //转移矩阵A
setIdentity(KF.measurementMatrix); //测量矩阵H
/*两个噪声的协方差矩阵*/
setIdentity(KF.processNoiseCov, Scalar::all(1e-5)); //系统噪声方差矩阵Q
setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1)); //测量噪声方差矩阵R
/*0时刻的后验状态和协方差*/
rng.fill(KF.statePost, RNG::UNIFORM, 0, winHeight>winWidth?winWidth:winHeight); //初始状态值x(0)
setIdentity(KF.errorCovPost, Scalar::all(1)); //后验估计协方差矩阵P
Mat measurement = Mat::zeros(measureNum, 1, CV_32F); //测量向量z(k)
namedWindow("kalman");
setMouseCallback("kalman", mouseEvent);
Mat image(winHeight, winWidth, CV_8UC3, Scalar(0));
while (1) {
//2.卡尔曼预测
Mat prediction = KF.predict();
Point predict_pt = Point(prediction.at<float>(0), prediction.at<float>(1) ); //预测值(x',y')
//3.更新测量
measurement.at<float>(0) = (float)mousePosition.x;
measurement.at<float>(1) = (float)mousePosition.y;
//4.更新
KF.correct(measurement);
image.setTo(Scalar(0,0,0));
circle(image, predict_pt, 8, Scalar(255,255,0), 3, LINE_AA); //预测位置
circle(image, mousePosition, 8, Scalar(255,0,255), 3, LINE_AA); //正确位置
char buf[256];
snprintf(buf, 256, "forecast pos:(%3d,%3d)", predict_pt.x, predict_pt.y);
putText(image, buf, Point(10,30), FONT_HERSHEY_SIMPLEX, 1, Scalar(255,255,255), 1, 8);
snprintf(buf, 256, "truth pos:(%3d,%3d)", mousePosition.x, mousePosition.y);
putText(image, buf, Point(10,60), FONT_HERSHEY_SIMPLEX, 1, Scalar(255,255,255), 1, 8);
imshow("kalman", image);
char c = waitKey(3);
if(c == '2') {
break;
}
}
}
/* 二维坐标和齐次坐标 */
void PictureAlgorithm::test45()
{
vector<Point2d> src_coners(4);
vector<Point2d> origin(4);
src_coners[0] = Point2d(287, 769);
src_coners[1] = Point2d(536, 812);
src_coners[2] = Point2d(109, 790);
src_coners[3] = Point2d(254, 924);
vector<Point3d> dst_coners(4);
convertPointsHomogeneous(src_coners, dst_coners);
for(auto &pt : dst_coners) {
cout << "齐次:(" << pt.x << ", " << pt.y << ", " << pt.z << ")" << endl;
pt.z = 2;
}
convertPointsFromHomogeneous(dst_coners, origin);
for(auto &pt : origin) {
cout << "非齐次:(" << pt.x << ", " << pt.y << ")" << endl;
}
}
/* 旋转向量与旋转矩阵 */
void PictureAlgorithm::test46()
{
//旋转向量 模代表旋转角度
Mat src = (Mat_<double>(3, 1) << 0, 0 , CV_PI/2);
cout << src << endl;
Mat dst;
Rodrigues(src, dst); //转化成旋转矩阵
cout << dst << endl;
Mat out;
Rodrigues(dst, out); //转化成旋转向量
cout << out << endl;
}
/* solvePnP */
void PictureAlgorithm::test47()
{
Mat im = imread("../x64/Debug/picture/35.jpg");
//二维图像点。如果改变图像,需要改变矢量
std::vector<Point2d> image_points;
image_points.push_back(Point2d(578, 181)); //鼻尖
image_points.push_back(Point2d(589, 321)); //下巴
image_points.push_back(Point2d(525, 145)); //左眼左角
image_points.push_back(Point2d(648, 142)); //右眼右角
image_points.push_back(Point2d(545, 238)); //左嘴角
image_points.push_back(Point2d(631, 239)); //右嘴角
//3D模型点
std::vector<Point3d> model_points;
model_points.push_back(Point3d(0.0f, 0.0f, 0.0f)); //鼻尖
model_points.push_back(Point3d(0.0f, -330.0f, -65.0f)); //下巴
model_points.push_back(Point3d(-225.0f, 170.0f, -135.0f)); //左眼左角
model_points.push_back(Point3d(225.0f, 170.0f, -135.0f)); //右眼右角
model_points.push_back(Point3d(-150.0f, -150.0f, -125.0f)); //左嘴角
model_points.push_back(Point3d(150.0f, -150.0f, -125.0f)); //右嘴角
//相机内部
double focal_length = im.cols; //近似焦距
Point2d center = Point2d(im.cols/2, im.rows/2);
Mat camera_matrix = (Mat_<double>(3,3) << focal_length, 0, center.x, 0 , focal_length, center.y, 0, 0, 1);
Mat dist_coeffs = Mat::zeros(4,1,DataType<double>::type); //假设没有透镜畸变
cout << "Camera Matrix " << endl << camera_matrix << endl ;
//输出旋转与平移
Mat rotation_vector; //以轴角形式旋转
Mat translation_vector;
//求解姿态
//solvePnP(model_points, image_points, camera_matrix, dist_coeffs, rotation_vector, translation_vector); //对异常值不过鲁棒,使用solvePnPRansac解决
solvePnPRansac(model_points, image_points, camera_matrix, dist_coeffs, rotation_vector, translation_vector);
//将一个3D点(0,0,1000.0)投影到图像平面上,用这个来画一条从鼻子伸出来的线
vector<Point3d> nose_end_point3D;
vector<Point2d> nose_end_point2D;
nose_end_point3D.push_back(Point3d(0,0,1000.0));
projectPoints(nose_end_point3D, rotation_vector, translation_vector, camera_matrix, dist_coeffs, nose_end_point2D);
for(int i=0; i < image_points.size(); i++) {
circle(im, image_points[i], 3, Scalar(0,0,255), -1);
}
line(im, image_points[0], nose_end_point2D[0], Scalar(255,0,0), 2);
cout << "Rotation Vector " << endl << rotation_vector << endl;
cout << "Translation Vector" << endl << translation_vector << endl;
cout << nose_end_point2D << endl;
imshow("Output", im);
}
/* 网格标定过程 */
void PictureAlgorithm::test48()
{
Mat image = imread("../x64/Debug/picture/37.jpg"); //必须是拍摄图片,36.bmp也可以
vector<vector<Point3f>> object_points; //存储标定板上的三维坐标
vector<vector<Point2f>> image_points; //存储图像上的二维坐标
Size board_size(7, 7); //标定板角点数 = 格子数-1
float square_size = 20; //标定板方格大小,单位mm
Mat gray;
vector<Point2f> corners;
int success_count = 0;
cvtColor(image, gray, COLOR_BGR2GRAY);
bool found = findChessboardCorners(gray, board_size, corners, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
if (found) {
cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::MAX_ITER, 30, 0.1));
drawChessboardCorners(image, board_size, corners, found);
imshow("Chessboard", image);
waitKey(100);
//生成标定板上的三维坐标
vector<Point3f> object_corner;
for(int j = 0; j < board_size.height; j++) {
for(int k = 0; k < board_size.width; k++) {
object_corner.push_back(Point3f(j * square_size, k * square_size, 0));
}
}
object_points.push_back(object_corner);
image_points.push_back(corners);
success_count++;
}
//相机内参和畸变参数计算
Mat camera_matrix, dist_coeffs;
vector<Mat> rvecs, tvecs;
double rms = calibrateCamera(object_points, image_points, image.size(), camera_matrix, dist_coeffs, rvecs, tvecs);
cout << "RMS error: " << rms << endl;
cout << "相机内参数矩阵: " << endl << camera_matrix << endl;
cout << "畸变矩阵: " << endl << dist_coeffs << endl;
}
/* 圆点标定过程 */
void PictureAlgorithm::test49()
{
//Mat img = imread("../x64/Debug/picture/38.png");
//Mat img = imread("../x64/Debug/picture/39.png");
Mat img = imread("../x64/Debug/picture/40.png");
Mat gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
//Blob算子参数
SimpleBlobDetector::Params params;
params.maxArea = 10000;
params.minArea = 10;
params.filterByArea = true;
params.minDistBetweenBlobs = 5;
Ptr<FeatureDetector> blobDetector = SimpleBlobDetector::create(params);
vector<Point2f> centers;
//Size patternSize(7, 7);
//Size patternSize(6, 9);
Size patternSize(4, 11);
Mat dst, img_size;
threshold(gray, gray, 120, 255, THRESH_BINARY);
//提取圆点特征的圆心
bool found = findCirclesGrid(gray, patternSize, centers, CALIB_CB_SYMMETRIC_GRID | CALIB_CB_CLUSTERING, blobDetector);
drawChessboardCorners(img, patternSize, centers, found);
imshow("corners", img);
}
/* 图片矫正 */
void PictureAlgorithm::test50()
{
Mat src = imread("../x64/Debug/picture/12_copy.png");
//源图像中书的四个角
vector<Point2f> pts_src;
pts_src.push_back(Point2f(0, 0));
pts_src.push_back(Point2f(src.cols, 0));
pts_src.push_back(Point2f(src.cols, src.rows));
pts_src.push_back(Point2f(0, src.rows));
//书的四个角在目标图像中
vector<Point2f> pts_dst;
pts_dst.push_back(Point2f(0, 0));
pts_dst.push_back(Point2f(src.cols/4, 0));
pts_dst.push_back(Point2f(src.cols/3, src.rows));
pts_dst.push_back(Point2f(0, src.rows/2));
//计算单应性
Mat h = findHomography(pts_dst, pts_src); //校正后会丢帧
Mat im_out;
//根据单应性将源图像传送到目的地
warpPerspective(src, im_out, h, src.size());
imshow("Source Image", src);
imshow("Warped Source Image", im_out);
}
struct callbackP
{
Mat src;
int clickTimes = 0; //在图像上单击次数
vector<Point2f> srcTri;
};
void onMouse50(int event, int x, int y, int flags, void *utsc)
{
if (event == EVENT_LBUTTONUP) { //响应鼠标左键事件
callbackP cp = *(callbackP*)utsc; //先转换类型,再取数据
circle((*(callbackP*)utsc).src, Point(x, y), 2, Scalar(0, 0, 255), 2); //标记选中点
imshow("src", (*(callbackP*)utsc).src);
(*(callbackP*)utsc).srcTri.push_back(Point2f(x, y));
cout << "x:" << x << " " << "y:" << y << endl;
(*(callbackP*)utsc).clickTimes++;
if ((*(callbackP*)utsc).clickTimes == 4) {
cout << "按任意键继续!" << endl;
}
}
}
/* 通过鼠标点击来图片矫正 */
void PictureAlgorithm::test51()
{
vector<Point2f> dstTri(4);
Mat dst;
callbackP utsc;
utsc.src = imread("../x64/Debug/picture/41.png");
namedWindow("src", WINDOW_AUTOSIZE);
imshow("src", utsc.src);
cout << "从需要透视变换区域的左上角开始,顺时针依次点矩形的四个角!" << endl;
setMouseCallback("src", onMouse50, (void*)&utsc); //类型转换
waitKey();
if (utsc.clickTimes >= 4) {
dstTri[0].x = 0;
dstTri[0].y = 0;
dstTri[1].x = utsc.srcTri[1].x - utsc.srcTri[0].x;
dstTri[1].y = 0;
dstTri[2].x = utsc.srcTri[1].x - utsc.srcTri[0].x;
dstTri[2].y = utsc.srcTri[2].y - utsc.srcTri[1].y;
dstTri[3].x = 0;
dstTri[3].y = utsc.srcTri[2].y - utsc.srcTri[1].y;
//计算透视矩阵
Mat M = findHomography(utsc.srcTri, dstTri, RANSAC);
//图像透视变换
warpPerspective(utsc.src, dst, M, Size((utsc.srcTri[1].x - utsc.srcTri[0].x), (utsc.srcTri[2].y - utsc.srcTri[1].y)));
imshow("output", dst);
}
else {
cout << "需要从左上角开始,顺时针依次点矩形的四个角!" << endl;
cout << "现在点击了" <<utsc.clickTimes << "次" <<endl;
}
imwrite("../x64/Debug/picture/41_z.png", dst);
}
/* undistort */
void PictureAlgorithm::test52()
{
Mat img = imread("../x64/Debug/picture/43.png");
Mat k = Mat::eye(3, 3, CV_32FC1); //内参矩阵
k.at<float>(0, 0) = 458.654;
k.at<float>(0, 1) = 0;
k.at<float>(0, 2) = 367.215;
k.at<float>(1, 0) = 0;
k.at<float>(1, 1) = 457.296;
k.at<float>(1, 2) = 248.375;
k.at<float>(2, 2) = 1;
Mat d = Mat::zeros(1, 4, CV_32FC1); //畸变系数矩阵 顺序是[k1, k2, p1, p2]
d.at<float>(0, 0) = -0.28340811;
d.at<float>(0, 1) = 0.07395907;
d.at<float>(0, 2) = 0.00019359;
d.at<float>(0, 3) = 1.76187114e-05;
Mat img_distort, img_absdiff;
undistort(img, img_distort, k, d);
absdiff(img, img_distort, img_absdiff);
imshow("img", img);
imshow("img_distort", img_distort);
imshow("img_absdiff", img_absdiff);
}
/* initUndistortRectifyMap */
void PictureAlgorithm::test53()
{
Mat img = imread("../x64/Debug/picture/43.png");
Mat k = Mat::eye(3, 3, CV_32FC1); //内参矩阵
k.at<float>(0, 0) = 458.654;
k.at<float>(0, 1) = 0;
k.at<float>(0, 2) = 367.215;
k.at<float>(1, 0) = 0;
k.at<float>(1, 1) = 457.296;
k.at<float>(1, 2) = 248.375;
k.at<float>(2, 2) = 1;
Mat d = Mat::zeros(1, 4, CV_32FC1); //畸变系数矩阵 顺序是[k1, k2, p1, p2]
d.at<float>(0, 0) = -0.28340811;
d.at<float>(0, 1) = 0.07395907;
d.at<float>(0, 2) = 0.00019359;
d.at<float>(0, 3) = 1.76187114e-05;
Mat img_distort, img_absdiff;
Mat map1, map2;
Mat NewCameraMatrix = getOptimalNewCameraMatrix(k, d, img.size(), 0, img.size(), 0);
initUndistortRectifyMap(k, d, cv::Mat(), NewCameraMatrix, img.size(), CV_16SC2, map1, map2);
remap(img, img_distort, map1, map2, cv::INTER_LINEAR);
absdiff(img, img_distort, img_absdiff);
imshow("img", img);
imshow("img_distort", img_distort);
imshow("img_absdiff", img_absdiff);
}
/* 3D转2D */
void PictureAlgorithm::test54()
{
vector<Point3f> points;
points.push_back(Point3f(.5, .5, -.5));
points.push_back(Point3f(.5, .5, .5));
points.push_back(Point3f(-.5, .5, .5));
points.push_back(Point3f(-.5, .5, -.5));
points.push_back(Point3f(.5, -.5, -.5));
points.push_back(Point3f(-.5, -.5, -.5));
points.push_back(Point3f(-.5, -.5, .5));
vector<Point2f> imagePoints;
Mat intrisicMat(3, 3, DataType<float>::type); //重矩阵
intrisicMat.at<float>(0, 0) = 1.6415318549788924e+003;
intrisicMat.at<float>(1, 0) = 0;
intrisicMat.at<float>(2, 0) = 0;
intrisicMat.at<float>(0, 1) = 0;
intrisicMat.at<float>(1, 1) = 1.7067753507885654e+003;
intrisicMat.at<float>(2, 1) = 0;
intrisicMat.at<float>(0, 2) = 5.3262822453148601e+002;
intrisicMat.at<float>(1, 2) = 3.8095355839052968e+002;
intrisicMat.at<float>(2, 2) = 1;
Mat rVec(3, 1, DataType<float>::type); //旋转矢量
rVec.at<float>(0) = -3.9277902400761393e-002;
rVec.at<float>(1) = 3.7803824407602084e-002;
rVec.at<float>(2) = 2.6445674487856268e-002;
Mat tVec(3, 1, DataType<float>::type); //平移向量
tVec.at<float>(0) = 2.1158489381208221e+000;
tVec.at<float>(1) = -7.6847683212704716e+000;
tVec.at<float>(2) = 2.6169795190294256e+001;
Mat distCoeffs(5, 1, DataType<float>::type); //变形向量
distCoeffs.at<float>(0) = -7.9134632415085826e-001;
distCoeffs.at<float>(1) = 1.5623584435644169e+000;
distCoeffs.at<float>(2) = -3.3916502741726508e-002;
distCoeffs.at<float>(3) = -1.3921577146136694e-002;
distCoeffs.at<float>(4) = 1.1430734623697941e-002;
std::vector<Point2f> projectedPoints;
projectPoints(points, rVec, tVec, intrisicMat, distCoeffs, projectedPoints);
cout << projectedPoints;
}
//用到的结构体和鼠标处理函数
struct imagedata
{
Mat img;
vector<Point2f>points;
//该points是test1原图需要做转换的坐标
//在test2中是原图转换后的坐标
};
void mouseHundle(int event,int x,int y,int flag,void *arg)
{
struct imagedata* ind = (struct imagedata *)arg;
if(event==EVENT_LBUTTONDOWN) { //鼠标左键按下
circle(ind->img, Point(x,y), 3, Scalar(0,0,255), 3, LINE_AA);
imshow("image", ind->img); //test1鸟瞰图显示原图带圆点
if(ind->points.size() < 4) {
ind->points.push_back(Point2f(x,y)); //转换的坐标只需要收集四个
}
}
}
/* 鸟瞰图变换,和例51方法相同 */
void PictureAlgorithm::test55()
{
Mat image = imread("../x64/Debug/picture/44.png");
imshow("image", image);
Mat result=Mat::zeros(500, 600, CV_8UC1);
//存储转换后的图像坐标 按顺时针 左上、右上、右下、左下(可自己定顺序)
vector<Point2f> obj;
obj.push_back(Point2f(0, 0));
obj.push_back(Point2f(600, 0));
obj.push_back(Point2f(600, 500));
obj.push_back(Point2f(0, 500));
//2.在原图窗口里做鼠标操作,通过鼠标回调函数,获取原图四个坐标
imagedata data;
data.img = image;//将原图传入
namedWindow("image");
setMouseCallback("image", mouseHundle, &data);//鼠标回调函数
waitKey(0);
Mat res = findHomography(data.points, obj, RANSAC);
warpPerspective(image, result, res, result.size());
imshow("result", result);
}
inline Scalar get_color(float depth)
{
float fRange = 40;
if (depth > 50) {
depth = 50;
}
if (depth < 10) {
depth = 10;
}
int iValue = 255 * depth / fRange;
return Scalar(iValue, 0, 255 - iValue);
}
/* 三角测量(没搞懂) */
void PictureAlgorithm::test56()
{
Mat img_1 = imread("../x64/Debug/picture/46_L.png");
Mat img_2 = imread("../x64/Debug/picture/46_R.png");
vector<KeyPoint> keypoints_1, keypoints_2;
vector<DMatch> matches;
Mat descriptors_1, descriptors_2;
Ptr<FeatureDetector> detector = ORB::create();
Ptr<DescriptorExtractor> descriptor = ORB::create();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
detector->detect(img_1, keypoints_1);
detector->detect(img_2, keypoints_2);
descriptor->compute(img_1, keypoints_1, descriptors_1);
descriptor->compute(img_2, keypoints_2, descriptors_2);
//对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
vector<DMatch> match;
matcher->match(descriptors_1, descriptors_2, match);
//设置一个经验值30作为下限.
for(int i = 0; i < descriptors_1.rows; i++) {
if (match[i].distance <= 30) {
matches.push_back(match[i]);
}
}
//估计两张图像间运动
Mat R, t;
Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1); //相机内参,TUM Freiburg2
vector<Point2f> points1;
vector<Point2f> points2;
for(int i = 0; i < (int) matches.size(); i++) { //把匹配点转换为vector<Point2f>的形式
points1.push_back(keypoints_1[matches[i].queryIdx].pt);
points2.push_back(keypoints_2[matches[i].trainIdx].pt);
}
//计算本质矩阵
Point2d principal_point(325.1, 249.7); //相机主点, TUM dataset标定值
int focal_length = 521; //相机焦距, TUM dataset标定值
Mat essential_matrix;
essential_matrix = findEssentialMat(points1, points2, focal_length, principal_point);
recoverPose(essential_matrix, points1, points2, R, t, focal_length, principal_point); //从本质矩阵中恢复旋转和平移信息.
//三角化
vector<Point3d> points;
Mat T1 = (Mat_<float>(3, 4) << 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0);
Mat T2 = (Mat_<float>(3, 4) << R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2), t.at<double>(0, 0),
R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2), t.at<double>(1, 0),
R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2), t.at<double>(2, 0));
K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
vector<Point2f> pts_1, pts_2;
for(DMatch m:matches) {
//将像素坐标转换至相机坐标
float x = (keypoints_1[m.queryIdx].pt.x - K.at<double>(0, 2)) / K.at<double>(0, 0);
float y = (keypoints_1[m.queryIdx].pt.y - K.at<double>(1, 2)) / K.at<double>(1, 1);
float x2 = (keypoints_2[m.trainIdx].pt.x - K.at<double>(0, 2)) / K.at<double>(0, 0);
float y2 = (keypoints_2[m.trainIdx].pt.y - K.at<double>(1, 2)) / K.at<double>(1, 1);
pts_1.push_back(Point2f(x, y));
pts_2.push_back(Point2f(x2, y2));
}
Mat pts_4d;
triangulatePoints(T1, T2, pts_1, pts_2, pts_4d);
//转换成非齐次坐标
for(int i = 0; i < pts_4d.cols; i++) {
Mat x = pts_4d.col(i);
x /= x.at<float>(3, 0); //归一化
Point3d p(x.at<float>(0, 0), x.at<float>(1, 0), x.at<float>(2, 0));
points.push_back(p);
}
//验证三角化点与特征点的重投影关系
K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
Mat img1_plot = img_1.clone();
Mat img2_plot = img_2.clone();
for(int i = 0; i < matches.size(); i++) {
//第一个图
float depth1 = points[i].z;
cout << "depth: " << depth1 << endl;
float x = (keypoints_1[matches[i].queryIdx].pt.x - K.at<double>(0, 2)) / K.at<double>(0, 0);
float y = (keypoints_1[matches[i].queryIdx].pt.y - K.at<double>(1, 2)) / K.at<double>(1, 1);
Point2d pt1_cam = Point2f(x, y);
circle(img1_plot, keypoints_1[matches[i].queryIdx].pt, 2, get_color(depth1), 2);
//第二个图
Mat pt2_trans = R * (Mat_<double>(3, 1) << points[i].x, points[i].y, points[i].z) + t;
float depth2 = pt2_trans.at<double>(2, 0);
circle(img2_plot, keypoints_2[matches[i].trainIdx].pt, 2, get_color(depth2), 2);
}
imshow("img 1", img1_plot);
imshow("img 2", img2_plot);
}
Point2d pixel2cam(const Point2d& p, const Mat& K)
{
return Point2d((p.x - K.at<double> (0,2))/ K.at<double> (0,0), (p.y - K.at<double> (1,2))/ K.at<double> (1,1));
}
/* 2d-2d位姿估计 */
void PictureAlgorithm::test57()
{
Mat img_1 = imread("../x64/Debug/picture/46_L.png", IMREAD_COLOR);
Mat img_2 = imread("../x64/Debug/picture/46_R.png", IMREAD_COLOR);
vector<KeyPoint> keypoints_1, keypoints_2;
vector<DMatch> matches;
Mat descriptors_1, descriptors_2;
Ptr<FeatureDetector> detector = ORB::create();
Ptr<DescriptorExtractor> descriptor = ORB::create();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create ("BruteForce-Hamming");
detector->detect(img_1,keypoints_1);
detector->detect(img_2,keypoints_2);
descriptor->compute(img_1, keypoints_1, descriptors_1);
descriptor->compute(img_2, keypoints_2, descriptors_2);
vector<DMatch> match;
matcher->match(descriptors_1, descriptors_2, match);
//匹配点对筛选
double min_dist = 10000, max_dist = 0;
//找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
for(int i = 0; i < descriptors_1.rows; i++) {
double dist = match[i].distance;
if (dist < min_dist) {
min_dist = dist;
}
if (dist > max_dist) {
max_dist = dist;
}
}
//当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
for(int i = 0; i < descriptors_1.rows; i++) {
if (match[i].distance <= max (2*min_dist, 30.0)) {
matches.push_back (match[i]);
}
}
Mat R, t; //估计两张图像间运动
Mat K = (Mat_<double> (3,3)<< 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1); //相机内参,TUM Freiburg2
vector<Point2f> points1;
vector<Point2f> points2;
for(int i = 0; i < (int)matches.size(); i++) { //把匹配点转换为vector<Point2f>的形式
points1.push_back (keypoints_1[matches[i].queryIdx].pt);
points2.push_back (keypoints_2[matches[i].trainIdx].pt);
}
//计算基础矩阵
Mat fundamental = findFundamentalMat(points1, points2);
cout<< "fundamental is " << endl << fundamental << endl;
//计算本质矩阵
Point2d principal_point (325.1, 249.7); //相机光心, TUM dataset标定值
double focal_length = 521; //相机焦距, TUM dataset标定值
Mat essential = findEssentialMat(points1, points2, focal_length, principal_point);
cout<< "essential is " << endl << essential << endl;
//从本质矩阵中恢复旋转和平移信息.
recoverPose(essential, points1, points2, R, t, focal_length, principal_point);
cout<< "R is " << endl << R << endl;
cout<< "t is "<< endl << t << endl;
//验证E=t^R*scale
Mat t_x = (Mat_<double> (3,3) << 0, -t.at<double> (2,0), t.at<double> (1,0),
t.at<double> (2,0), 0, -t.at<double> (0,0), -t.at<double> (1,0), t.at<double> (0,0), 0);
cout<< "t^R=" << endl << t_x*R << endl;
//验证对极约束
K = (Mat_<double> (3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
for(DMatch m : matches) {
Point2d pt1 = pixel2cam(keypoints_1[ m.queryIdx ].pt, K);
Mat y1 = (Mat_<double> (3,1)<< pt1.x, pt1.y, 1);
Point2d pt2 = pixel2cam(keypoints_2[ m.trainIdx ].pt, K);
Mat y2 = (Mat_<double> (3,1)<< pt2.x, pt2.y, 1);
Mat d = y2.t()* t_x * R * y1;
cout << "纵向约束:" << d << endl;
}
}
/* 极线 */
void PictureAlgorithm::test58()
{
Mat rgb1 = imread("../x64/Debug/picture/46_L.png", IMREAD_COLOR);
Mat rgb2 = imread("../x64/Debug/picture/46_R.png", IMREAD_COLOR);
Ptr<FeatureDetector> detector = ORB::create();
Ptr<DescriptorExtractor> descriptor = ORB::create();
vector<KeyPoint> kp1, kp2;
detector->detect(rgb1, kp1);
detector->detect(rgb2, kp2);
Mat desp1, desp2;
descriptor->compute(rgb1, kp1, desp1);
descriptor->compute(rgb2, kp2, desp2);
vector<DMatch> matches;
BFMatcher matcher;
matcher.match(desp1, desp2, matches);
//筛选匹配对
vector<DMatch> goodMatches;
double minDis = 9999;
for(size_t i=0; i<matches.size(); i++) {
if (matches[i].distance < minDis) {
minDis = matches[i].distance;
}
}
for(size_t i=0; i<matches.size(); i++) {
if (matches[i].distance < 10*minDis) {
goodMatches.push_back(matches[i]);
}
}
vector<Point2f> pts1, pts2;
for(size_t i=0; i<goodMatches.size(); i++) {
pts1.push_back(kp1[goodMatches[i].queryIdx].pt);
pts2.push_back(kp2[goodMatches[i].trainIdx].pt);
}
//首先根据对应点计算出两视图的基础矩阵,基础矩阵包含了两个相机的外参数关系
Mat fundamental = findFundamentalMat(pts1, pts2, FM_8POINT);
//计算对应点的外极线epilines是一个三元组(a,b,c),表示点在另一视图中对应的外极线ax+by+c=0;
vector<Vec<float, 3>> epilines1, epilines2;
computeCorrespondEpilines(pts1, 1, fundamental, epilines1);
computeCorrespondEpilines(pts2, 2, fundamental, epilines2);
RNG &rng = theRNG();
for(int i = 0; i < 50; ++i) { //只展示部分
Scalar color = Scalar(rng(255), rng(255), rng(255));
circle(rgb1, pts1[i], 5, color, 3);
//绘制外极线的时候,选择两个点,一个是x=0处的点,一个是x为图片宽度处
line(rgb1, Point(0, -epilines2[i][2] / epilines2[i][1]), Point(rgb1.cols, -(epilines2[i][2] + epilines2[i][0] * rgb1.cols) / epilines2[i][1]), color, 1, LINE_AA);
circle(rgb2, pts2[i], 5, color, 3);
line(rgb2, Point(0, -epilines1[i][2] / epilines1[i][1]), Point(rgb2.cols, -(epilines1[i][2] + epilines1[i][0] * rgb2.cols) / epilines1[i][1]), color, 1, LINE_AA);
}
imshow("epiline1", rgb2);
imshow("epiline2", rgb1);
}
Mat rectifyImageL, rectifyImageR;
Rect validROIL; //图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域
Rect validROIR;
Mat Q;
int blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
void getDisparityImage(Mat disparity, Mat& disparityImage, bool isColor)
{
}
/* 立体匹配 */
void stereo_match(int, void*)
{
//1.SGBM使用Birchfield-Tomasi度量[Birchfield99]在子像素水平上进行匹配。
//2.SGBM视图基于所计算的深度信息来强制实现全局平滑约束。
//这两种算法是互补的,BM计算稍快一些,但SGBM的可靠性和精确性更高一些。
Mat disp, disp8, disparityImage;
if(false) {
Ptr<StereoBM> bm = StereoBM::create(16, 9);
bm->setBlockSize(2 * blockSize + 5); //SAD窗口大小,5~21之间为宜
bm->setROI1(validROIL);
bm->setROI2(validROIR);
bm->setPreFilterCap(31);
bm->setMinDisparity(0); //最小视差,默认值为0, 可以是负值,int型
bm->setNumDisparities(numDisparities * 16 + 16); //视差窗口,即最大视差值与最小视差值之差,窗口大小必须是16的整数倍,int型
bm->setTextureThreshold(10);
bm->setUniquenessRatio(uniquenessRatio); //uniquenessRatio主要可以防止误匹配
bm->setSpeckleWindowSize(100);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(-1);
bm->compute(rectifyImageL, rectifyImageR, disp); //输入图像必须为灰度图
}
else {
int channel = rectifyImageL.channels();
int SADWindowSize = 2 * blockSize + 5;
cv::Ptr<cv::StereoSGBM> sgbm = StereoSGBM::create(0, 16, 3);
sgbm->setPreFilterCap(15);
sgbm->setBlockSize(SADWindowSize);
sgbm->setP1(8 * channel * SADWindowSize * SADWindowSize);
sgbm->setP2(32 * channel * SADWindowSize * SADWindowSize);
sgbm->setMinDisparity(0);
sgbm->setNumDisparities(numDisparities * 16 + 16);
sgbm->setUniquenessRatio(uniquenessRatio);
sgbm->setSpeckleWindowSize(50);
sgbm->setSpeckleRange(32);
sgbm->setDisp12MaxDiff(1);
sgbm->setMode(cv::StereoSGBM::MODE_SGBM);
sgbm->compute(rectifyImageL, rectifyImageR, disp);
}
disp.convertTo(disp8, CV_8U, 255 / ((numDisparities * 16 + 16)*16.)); //计算出的视差是CV_16S格式
Mat xyz; //三维坐标
reprojectImageTo3D(disp, xyz, Q, true); //在实际求距离时,ReprojectTo3D出来的X / W, Y / W, Z / W都要乘以16(也就是W除以16),才能得到正确的三维坐标信息。
xyz = xyz * 16;
imshow("3D", xyz);
Mat disp8u;
disp8u = disp8;
disparityImage = Mat::zeros(disp8.rows, disp8.cols, CV_8UC3);
for(int y = 0; y<disp8.rows; y++) { //转换为伪彩色图像
for(int x = 0; x<disp8.cols; x++) {
uchar val = disp8u.at<uchar>(y, x);
uchar r, g, b;
if (val == 0) {
r = g = b = 0;
}
else {
r = 255 - val;
g = val < 128 ? val * 2 : (uchar)((255 - val) * 2);
b = val;
}
disparityImage.at<Vec3b>(y, x) = Vec3b(r, g, b);
}
}
imshow("disparity", disparityImage);
}
/* 获取深度图 */
void PictureAlgorithm::test59()
{
Size imageSize = Size(640, 360);
Mat rgbImageL, grayImageL;
Mat rgbImageR, grayImageR;
Mat mapLx, mapLy, mapRx, mapRy; //映射表
Mat Rl, Rr, Pl, Pr; //校正旋转矩阵R,投影矩阵P 重投影矩阵Q
Mat cameraMatrixL = (Mat_<double>(3, 3) << 8.6447560726240727e+02, 0, 3.1396077284379163e+02, //事先标定好的相机的参数 fx 0 cx 0 fy cy 0 0 1
0, 7.9990966684818886e+02, 4.1785668776720030e+02,
0, 0, 1);
Mat distCoeffL = (Mat_<double>(5, 1) << -8.3290443472456477e-01, -1.6141488144887639e+00, -2.0021554243964865e-01,
-5.0519644795720990e-03, 3.9975929024497163e+00);
Mat cameraMatrixR = (Mat_<double>(3, 3) << 8.3828754655778573e+02, 0, 3.2434901263473841e+02,
0, 7.8106334038218915e+02, 4.4025865037526029e+02,
0, 0, 1);
Mat distCoeffR = (Mat_<double>(5, 1) << -6.9576263466271460e-01, -9.7033517758028354e-01, -1.6926908600014581e-01,
-4.1180802214907403e-03, 1.9769688036876045e+00);
Mat T = (Mat_<double>(3, 1) << -5.4680441729569061e+01, 3.1352955093965731e+00, -1.0681705203000762e+01); //T平移向量
Mat R = (Mat_<double>(3, 3) << 9.9981573574061489e-01, 7.2404315717654781e-03, -1.7778377769287649e-02,
-7.8525554838869468e-03, 9.9937018554944201e-01, -3.4605918673635071e-02,
1.7516618903926539e-02, 3.4739147737507171e-02, 9.9924289323299487e-01); //R 旋转矩阵
//1)stereoRectify执行双目校正,获得校正旋转矩阵R,投影矩阵P 重投影矩阵Q
stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY, 0, imageSize, &validROIL, &validROIR);
//2)initUndistortRectifyMap 分别生成两个图像校正所需的像素映射矩阵
initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pr, imageSize, CV_32FC1, mapLx, mapLy);
initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);
rgbImageL = imread("../x64/Debug/picture/45_L.png", IMREAD_COLOR);
cvtColor(rgbImageL, grayImageL, COLOR_BGR2GRAY);
rgbImageR = imread("../x64/Debug/picture/45_R.png", IMREAD_COLOR);
cvtColor(rgbImageR, grayImageR, COLOR_BGR2GRAY);
imshow("矫正前的左图像", grayImageL);
imshow("矫正前的右图像", grayImageR);
//3)remap分别对两个图像进行校正,校正之后,由不共面到共面
remap(grayImageL, rectifyImageL, mapLx, mapLy, INTER_LINEAR); //经过remap之后,左右相机的图像已经共面并且行对准了
remap(grayImageR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);
Mat rgbRectifyImageL, rgbRectifyImageR; //把校正结果显示出来
cvtColor(rectifyImageL, rgbRectifyImageL, COLOR_GRAY2BGR); //伪彩色图
cvtColor(rectifyImageR, rgbRectifyImageR, COLOR_GRAY2BGR);
rectangle(rgbRectifyImageL, validROIL, Scalar(0, 255, 0), 3, 8); //单独显示
rectangle(rgbRectifyImageR, validROIR, Scalar(0, 255, 0), 3, 8);
imshow("矫正后的左图像", rgbRectifyImageL);
imshow("矫正后的右图像", rgbRectifyImageR);
Mat canvas;
double sf;
int w, h;
sf = 600. / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width * sf);
h = cvRound(imageSize.height * sf);
canvas.create(h, w * 2, CV_8UC3); //注意通道
Mat canvasPart = canvas(Rect(w * 0, 0, w, h)); //得到画布的一部分
resize(rgbRectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA); //缩放,左图像画到画布上
Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y * sf), cvRound(validROIL.width*sf), cvRound(validROIL.height * sf)); //获得被截取的区域
rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8); //画上一个矩形
canvasPart = canvas(Rect(w, 0, w, h)); //获得画布的另一部分
resize(rgbRectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR); //右图像画到画布上
Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf), cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
rectangle(canvasPart, vroiR, Scalar(0, 0, 255), 3, 8);
for(int i = 0; i < canvas.rows; i += 16) { //画上对应的线条
line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
}
imshow("纠正后的图像", canvas);
namedWindow("disparity", WINDOW_AUTOSIZE);
createTrackbar("块大小:\n", "disparity", &blockSize, 8, stereo_match); //创建SAD窗口 Trackbar
createTrackbar("唯一比率:\n", "disparity", &uniquenessRatio, 50, stereo_match); //创建视差唯一性百分比窗口 Trackbar
createTrackbar("差异数量:\n", "disparity", &numDisparities, 16, stereo_match); //创建视差窗口 Trackbar
//4)stereoBM生成视差图
stereo_match(0, 0);
}
/* 级联分类器人脸检测 */
void PictureAlgorithm::test60()
{
CascadeClassifier face_classifier; //创建分类器
if (!face_classifier.load("E:/opencv/sources/data/haarcascades/haarcascade_frontalface_alt.xml")) { //加载分类数据
printf("could not load face feature data...\n");
return;
}
VideoCapture vcHandle = VideoCapture("../x64/Debug/video/11.mp4");
Mat frame;
if(!vcHandle.isOpened()) {
return;
}
int i = 1;
while(1) {
vcHandle.read(frame);
if (frame.empty()) {
printf("could not load image...\n");
break;
}
Mat gray;
cvtColor(frame, gray, COLOR_BGR2GRAY); //转成灰度图
equalizeHist(gray, gray); //直方图均衡化,提高对比度
vector<Rect> faces;
face_classifier.detectMultiScale(gray, faces, 1.2, 3, 0, Size(24, 24)); //在多尺度上检测
for (size_t t = 0; t < faces.size(); t++) {
rectangle(frame, faces[static_cast<int>(t)], Scalar(0, 0, 255), 2, 8, 0);
}
imshow("detect faces", frame);
char c = waitKey(1);
if(c == '2') { //键盘响应事件
break;
}
}
}
/* 级联分类器人眼检测 */
void PictureAlgorithm::test61()
{
CascadeClassifier ccFace;
CascadeClassifier ccEye;
String facefile = "E:/opencv/sources/data/haarcascades/haarcascade_frontalface_alt.xml";
String eyefile = "E:/opencv/sources/data/haarcascades/haarcascade_eye.xml";
if (!ccFace.load(facefile)) {
printf("could not load face feature data...\n");
return;
}
if (!ccEye.load(eyefile)) {
printf("could not load eye feature data...\n");
return;
}
VideoCapture capture("../x64/Debug/video/10.mp4");
Mat frame;
Mat gray;
vector<Rect> faces;
vector<Rect> eyes;
while (capture.read(frame)) {
cvtColor(frame, gray, COLOR_BGR2GRAY);
equalizeHist(gray, gray);
ccFace.detectMultiScale(gray, faces, 1.2, 3, 0, Size(30, 30));
//眼睛一定在人脸范围内,通过截取ROI,缩小检测范围提高检测的准确度和速度
for (size_t t = 0; t < faces.size(); t++) {
Rect rect;
rect.x = faces[static_cast<int>(t)].x;
rect.y = faces[static_cast<int>(t)].y;
rect.width = faces[static_cast<int>(t)].width;
rect.height = faces[static_cast<int>(t)].height / 2;
Mat faceROI = frame(rect); //截取眼睛ROI,在脸的上半部分
ccEye.detectMultiScale(faceROI, eyes, 1.2, 3, 0, Size(20, 20));
for (size_t k = 0; k < eyes.size(); k++) {
rect.x = faces[static_cast<int>(t)].x + eyes[k].x;
rect.y = faces[static_cast<int>(t)].y + eyes[k].y;
rect.width = eyes[k].width;
rect.height = eyes[k].height;
rectangle(frame, rect, Scalar(0, 255, 0), 2, 8, 0); //坐标变换得到眼睛真正的坐标
}
rectangle(frame, faces[static_cast<int>(t)], Scalar(0, 0, 255), 2, 8, 0);
}
imshow("camera-demo", frame);
char c = waitKey(30);
if (c == 27) {
break;
}
}
}
int g_iCurRadius = 5;
Mat g_maGray;
void Demo_ELBP(int, void*)
{
Mat dst;
int offset = g_iCurRadius * 2;
dst = Mat::zeros(g_maGray.rows - offset, g_maGray.cols - offset, CV_8UC1);
int height = g_maGray.rows;
int width = g_maGray.cols;
int neighbors = 8;
for (int n = 0; n<neighbors; n++) {
//采样点
float x = static_cast<float>(g_iCurRadius) * cos(2.0*CV_PI*n / static_cast<float>(neighbors));
float y = static_cast<float>(g_iCurRadius) * -sin(2.0*CV_PI*n / static_cast<float>(neighbors));
//相对指数
int fx = static_cast<int>(floor(x)); //向下取整
int fy = static_cast<int>(floor(y));
int cx = static_cast<int>(ceil(x)); //向上取整
int cy = static_cast<int>(ceil(y));
//小数部分
float ty = y - fy;
float tx = x - fx;
//设置插值比重
float w1 = (1 - tx) * (1 - ty);
float w2 = tx * (1 - ty);
float w3 = (1 - tx) * ty;
float w4 = tx * ty;
for (int i = g_iCurRadius; i < g_maGray.rows - g_iCurRadius; i++) {
for (int j = g_iCurRadius; j < g_maGray.cols - g_iCurRadius; j++) {
float t = w1*g_maGray.at<uchar>(i + fy, j + fx) + w2*g_maGray.at<uchar>(i + fy, j + cx) +
w3*g_maGray.at<uchar>(i + cy, j + fx) + w4*g_maGray.at<uchar>(i + cy, j + cx);
//处理的是浮点精度,添加一些公差
dst.at<uchar>(i - g_iCurRadius, j - g_iCurRadius) +=
((t > g_maGray.at<uchar>(i, j)) && (abs(t - g_maGray.at<uchar>(i, j)) > std::numeric_limits<float>::epsilon())) << n;
}
}
}
imshow("ELBP_Result", dst);
}
/* 局部二值特征 */
void PictureAlgorithm::test62()
{
Mat src, dst, LBP_image;
src = imread("../x64/Debug/picture/6.jpg");
cvtColor(src, g_maGray, COLOR_BGR2GRAY);
//默认LBP图像
LBP_image = Mat::zeros(src.rows - 2, src.cols - 2, CV_8UC1);
for (int i = 1; i < g_maGray.rows-1; i++) {
for (int j = 1; j < g_maGray.cols-1; j++) {
uchar center = g_maGray.at<uchar>(i, j);
uchar code = 0;
code |= (g_maGray.at<uchar>(i-1, j-1) > center) << 7;
code |= (g_maGray.at<uchar>(i-1, j) > center) << 6;
code |= (g_maGray.at<uchar>(i-1, j+1) > center) << 5;
code |= (g_maGray.at<uchar>(i, j+1) > center) << 4;
code |= (g_maGray.at<uchar>(i+1, j+1) > center) << 3;
code |= (g_maGray.at<uchar>(i+1, j) > center) << 2;
code |= (g_maGray.at<uchar>(i+1, j-1) > center) << 1;
code |= (g_maGray.at<uchar>(i, j-1) > center) << 0;
LBP_image.at<uchar>(i-1, j-1) = code;
}
}
imshow("LBP 结果", LBP_image);
namedWindow("ELBP_Result", WINDOW_AUTOSIZE);
createTrackbar("Radius:", "ELBP_Result", &g_iCurRadius, 20, Demo_ELBP);
Demo_ELBP(0, 0);
}
/* BOW创建和训练目标检测器 */
void PictureAlgorithm::test63()
{
int clusterNum = 10;
vector<KeyPoint> p1, p2, p3;
Mat dp1, dp2, dp3;
Mat pic1 = imread("../x64/Debug/picture/test1/1025.png", IMREAD_GRAYSCALE);
Mat pic2 = imread("../x64/Debug/picture/test1/1026.png", IMREAD_GRAYSCALE);
Mat pic3 = imread("../x64/Debug/picture/test1/1027.png", IMREAD_GRAYSCALE);
获取所有图像关键点和描述符
Ptr<SIFT> sift = SIFT::create();
Ptr<SIFT> extractor = SIFT::create();
sift->detect(pic1, p1);
extractor->compute(pic1, p1, dp1);
sift->detect(pic2, p2);
extractor->compute(pic2, p2, dp2);
sift->detect(pic3, p3);
extractor->compute(pic3, p3, dp3);
BOWKMeansTrainer bow = BOWKMeansTrainer(clusterNum, TermCriteria(TermCriteria::MAX_ITER, 15, 0.1));
//将描述符添加到trainset
bow.add(dp1);
bow.add(dp2);
bow.add(dp3);
cout << "the count of the descriptors :" << bow.descriptorsCount() << endl;
//获取词袋
Mat vocabulary = bow.cluster();
cout << "词袋" << vocabulary << endl;
Ptr<DescriptorExtractor> descriptorExtractor = SIFT::create();
Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create("BruteForce");
BOWImgDescriptorExtractor bowex = BOWImgDescriptorExtractor(descriptorExtractor, descriptorMatcher);
Mat bowfeatures;
bowex.setVocabulary(vocabulary);
bowex.compute(pic1, p1, bowfeatures);
cout << "图像描述符" << bowfeatures << endl;
}
/* SURF,需要opencv_contrib库才能运行 */
void PictureAlgorithm::test64()
{
Mat srcImage1 = imread("../x64/Debug/picture/test1/1025.png", IMREAD_COLOR);
Mat srcImage2 = imread("../x64/Debug/picture/test1/1026.png", IMREAD_COLOR);
int minHessian = 400;//默认值为100
vector<KeyPoint>keyPoints, keyPoints1;
Mat resultImg, resultImage1;
//SURF特征检测 //也可以写成SURF::create(minHessian)
Ptr<xfeatures2d::SURF> detector = xfeatures2d::SURF::create(minHessian, 4, 3, false, false);
detector->detect(srcImage1, keyPoints, Mat());
drawKeypoints(srcImage1, keyPoints, resultImg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
imshow("SURF", resultImg);
//SIFT特征检测
Ptr<SIFT>detector1 = SIFT::create();
detector1->detect(srcImage2, keyPoints1, Mat());
//绘制关键点
drawKeypoints(srcImage2, keyPoints1, resultImage1, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imshow("SIFT", resultImage1);
}
/* 画delaunay三角形 */
void drawDelaunay(Mat& img, Subdiv2D& subdiv, Scalar delaunay_color)
{
vector<Vec6f> triangleList;
subdiv.getTriangleList(triangleList);
vector<Point> pt(3);
Size size = img.size();
Rect rect(0,0, size.width, size.height);
for(size_t i = 0; i < triangleList.size(); i++) {
Vec6f t = triangleList[i];
pt[0] = Point(cvRound(t[0]), cvRound(t[1]));
pt[1] = Point(cvRound(t[2]), cvRound(t[3]));
pt[2] = Point(cvRound(t[4]), cvRound(t[5]));
//完全在图像内部绘制矩形
if (rect.contains(pt[0]) && rect.contains(pt[1]) && rect.contains(pt[2])) {
line(img, pt[0], pt[1], delaunay_color, 1, LINE_AA, 0);
line(img, pt[1], pt[2], delaunay_color, 1, LINE_AA, 0);
line(img, pt[2], pt[0], delaunay_color, 1, LINE_AA, 0);
}
}
}
/* DeIaunary三角剖分 */
void PictureAlgorithm::test65()
{
string win_delaunay = "delaunay三角剖分";
string win_voronoi = "voronoi多边形法图";
Scalar delaunay_color(255,255,255), points_color(128, 0, 128);
Mat img = imread("../x64/Debug/picture/test1/1025.png");
Mat img_orig = img.clone();
Size size = img.size();
Rect rect(0, 0, size.width, size.height); //用于Subdiv2D的矩形
Subdiv2D subdiv(rect); //创建Subdiv2D的实例
vector<KeyPoint> points;
Ptr<SIFT>detector1 = SIFT::create();
detector1->detect(img, points, Mat());
for(auto it = points.begin(); it != points.end(); it++) { //向subdiv中插入点
subdiv.insert(it->pt);
if (true) { //展示绘图细节
Mat img_copy = img_orig.clone();
drawDelaunay(img_copy, subdiv, delaunay_color);
imshow(win_delaunay, img_copy);
waitKey(10);
}
}
drawDelaunay(img, subdiv, delaunay_color);
for(auto it = points.begin(); it != points.end(); it++) {
circle(img, it->pt, 3, points_color, 1, LINE_AA, 0); //画点
}
Mat maVoronoi = Mat::zeros(img.rows, img.cols, CV_8UC3);
// 画voronoi多边形
vector<vector<Point2f> > facets;
vector<Point2f> centers;
subdiv.getVoronoiFacetList(vector<int>(), facets, centers);
vector<Point> ifacet;
vector<vector<Point>> ifacets(1);
for(size_t i = 0; i < facets.size(); i++) {
ifacet.resize(facets[i].size());
for(size_t j = 0; j < facets[i].size(); j++) {
ifacet[j] = facets[i][j];
}
Scalar color;
color[0] = rand() & 255;
color[1] = rand() & 255;
color[2] = rand() & 255;
fillConvexPoly(maVoronoi, ifacet, color, 8, 0);
ifacets[0] = ifacet;
polylines(maVoronoi, ifacets, true, Scalar(), 1, LINE_AA, 0);
circle(maVoronoi, centers[i], 3, Scalar(255,255,255), 1, LINE_AA, 0);
}
imshow(win_delaunay, img);
imshow(win_voronoi, maVoronoi);
}
/* 透明斗篷 */
void PictureAlgorithm::test66()
{
VideoCapture cap("../x64/Debug/video/15.mp4");
Mat background;
for (int i = 0; i < 250; i++) { //红布第251帧才出现,跳过前250帧
Mat frame_slip;
if(i == 24) {
cap >> background; //找一张没有人的背景图
}
else {
cap >> frame_slip;
}
continue;
}
flip(background, background, 1); //沿Y轴翻转图像
//图像读取
while (1) {
Mat frame;
cap >> frame;
if (frame.empty()) {
break;
}
Mat hsv;
flip(frame, frame, 1);
cvtColor(frame, hsv, COLOR_BGR2HSV);
Mat mask1, mask2; //红色区域1,红色区域2
Mat mask_red; //红色区域
Mat mask_background; //背景区域
//过滤颜色,二值图,其中黑色0表示无红色,白色1表示红色区域
inRange(hsv, Scalar(0, 120, 70), Scalar(10, 255, 255), mask1);
inRange(hsv, Scalar(170, 120, 70), Scalar(180, 255, 255), mask2);
mask_red = mask1 + mask2;
//去除噪声
Mat kernel = Mat::ones(3, 3, CV_32F);
morphologyEx(mask_red, mask_red, cv::MORPH_OPEN, kernel);
morphologyEx(mask_red, mask_red, cv::MORPH_DILATE, kernel);
//将mask_red中0,1互换,得到背景区域范围
bitwise_not(mask_red, mask_background);
Mat res1, res2, final_output;
//从当前帧抠出背景区域res1,红布区域被涂成黑色
bitwise_and(frame, frame, res1, mask_background);
imshow("res1", res1);
//从背景帧提取红布区域覆盖的背景res2
bitwise_and(background, background, res2, mask_red);
imshow("res2", res2);
addWeighted(res1, 1, res2, 1, 0, final_output);
//展示图像
imshow("Magic", final_output);
char c = (char)waitKey(1);
if (c == 27) {
break;
}
}
}
/* 寻找单个质心 */
void PictureAlgorithm::test67()
{
Mat src, gray, thr;
src = imread("../x64/Debug/picture/circle.png");
cvtColor(src, gray, COLOR_BGR2GRAY);
threshold(gray, thr, 0, 255, THRESH_OTSU); //二值化
Moments m = moments(thr, true); //提取二值图像矩,true表示图像二值化了
Point p(m.m10 / m.m00, m.m01 / m.m00);
//质心坐标
cout << Mat(p) << endl;
circle(src, p, 5, Scalar(128, 0, 0), -1);
imshow("show", src);
}
/* 寻找多个质心 */
void PictureAlgorithm::test68()
{
Mat src, gray;
src = imread("../x64/Debug/picture/multiple.png");
cvtColor(src, gray, COLOR_BGR2GRAY);
imshow("Source", src);
Mat canny_output; //寻质心函数
vector<vector<Point> > contours; //各个轮廓的点集合
vector<Vec4i> hierarchy; //轮廓输出结果向量
Canny(gray, canny_output, 50, 150, 3); //边缘算子提取轮廓
findContours(canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0)); //寻找轮廓 RETR_TREE表示提取所有轮廓
vector<Moments> mu(contours.size()); //图像矩
for (int i = 0; i < contours.size(); i++) { //求取每个轮廓的矩
mu[i] = moments(contours[i], false);
}
vector<Point2f> mc(contours.size()); //轮廓质点
for (int i = 0; i < contours.size(); i++) {
mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
}
Mat drawing(canny_output.size(), CV_8UC3, Scalar(255, 255, 255));
for (int i = 0; i < contours.size(); i++) {
Scalar color = Scalar(167, 151, 0);
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point()); //画轮廓
circle(drawing, mc[i], 4, color, -1, 7, 0); //画质心
}
imshow("Contours", drawing);
}
/* 提取出来的二维码 */
void PictureAlgorithm::test69()
{
Mat inputImage = imread("../x64/Debug/picture/54.jpg");
imshow("Rectified QRCode", inputImage);
QRCodeDetector qrDecoder = QRCodeDetector::QRCodeDetector(); //QR检测器
Mat bbox, rectifiedImage; //二维码边框坐标,提取出来的二维码
std::string data = qrDecoder.detectAndDecode(inputImage, bbox, rectifiedImage); //检测二维码
//获取二维码中的数据链接
if (data.length() > 0) {
cout << "Decoded Data : " << data << endl;
int n = bbox.rows;
for (int i = 0; i < n; i++) {
Point p1 = Point2i(bbox.at<float>(i, 0), bbox.at<float>(i, 1));
Point p2 = Point2i(bbox.at<float>((i + 1) % n, 0), bbox.at<float>((i + 1) % n, 1));
line(inputImage, p1, p2, Scalar(255, 0, 0), 3);
}
imshow("Result", inputImage);
}
else {
cout << "QR Code not detected" << endl;
}
}
/* 使用OpenCV实现基于特征的图像对齐 */
void PictureAlgorithm::test70()
{
//模板和需要矫正的图像相似性要很大,否则会失败(效果不如PictureAlgorithm::test51())
const int MAX_FEATURES = 5000; //最大特征点数
const float GOOD_MATCH_PERCENT = 0.15; //好的特征点数
Mat imReference = imread("../x64/Debug/picture/59_1.jpg"); //读取参考图像
Mat im = imread("../x64/Debug/picture/59.jpg"); //读取对准图像
Mat imReg, h; //结果图像,单应性矩阵
Mat im1Gray, im2Gray;
cvtColor(im, im1Gray, COLOR_BGR2GRAY);
cvtColor(imReference, im2Gray, COLOR_BGR2GRAY);
vector<KeyPoint> keypoints1, keypoints2; //关键点
Mat descriptors1, descriptors2; //特征描述符
Ptr<Feature2D> orb = ORB::create(MAX_FEATURES);
orb->detectAndCompute(im1Gray, Mat(), keypoints1, descriptors1); //计算ORB特征和描述子
orb->detectAndCompute(im2Gray, Mat(), keypoints2, descriptors2);
vector<DMatch> matches;
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming"); //汉明距离进行特征点匹配
matcher->match(descriptors1, descriptors2, matches, Mat());
sort(matches.begin(), matches.end()); //按照特征点匹配结果从优到差排列
const int numGoodMatches = matches.size() * GOOD_MATCH_PERCENT;
matches.erase(matches.begin() + numGoodMatches, matches.end()); //移除不好的特征点
Mat imMatches;
drawMatches(im, keypoints1, imReference, keypoints2, matches, imMatches); //画出特征点匹配图
imshow("matches.jpg", imMatches);
//提取好匹配的位置
vector<Point2f> points1, points2;
for (size_t i = 0; i < matches.size(); i++) { //保存对应点
points1.push_back(keypoints1[matches[i].queryIdx].pt); //queryIdx是对齐图像的描述子和特征点的下标
points2.push_back(keypoints2[matches[i].trainIdx].pt); //trainIdx是是样本图像的描述子和特征点的下标
}
h = findHomography(points1, points2, RANSAC);
warpPerspective(im, imReg, h, imReference.size()); //映射
imshow("123", imReg);
cout << "Estimated homography : \n" << h << endl;
}
/* 计算梯度 */
Mat GetGradient(Mat src_gray)
{
Mat grad_x, grad_y;
Mat maX, maY;
int scale = 1;
int delta = 0;
int ddepth = CV_32FC1;
Sobel(src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT); //计算sobel算子
convertScaleAbs(grad_x, maX); //使用线性变换转换输入数组元素成8位无符号整型
Sobel(src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_y, maY);
Mat grad;
addWeighted(maX, 0.5, maY, 0.5, 0, grad); //合并算子
return grad;
}
/* 增强相关系数最大化的图像对齐 */
void PictureAlgorithm::test71()
{
Mat im = imread("../x64/Debug/picture/60_2.jpg", IMREAD_GRAYSCALE);
imshow("原图", im);
Size sz = im.size();
int height = sz.height / 3;
int width = sz.width;
//图像剪断之后就是BGR,特定图像
vector<Mat> channels;
channels.push_back(im(Rect(0, 0, width, height)));
channels.push_back(im(Rect(0, height, width, height)));
channels.push_back(im(Rect(0, 2 * height, width, height)));
Mat im_color;
merge(channels, im_color); //通道合并,将图像合并成一张图
imshow("Color Image", im_color);
//设置对齐图像
vector<Mat> vAligned;
vAligned.push_back(Mat(height, width, CV_8UC1));
vAligned.push_back(Mat(height, width, CV_8UC1));
//蓝色和绿色通道将与红色通道对齐,所以复制红色通道
vAligned.push_back(channels[2].clone());
const int warp_mode = MOTION_AFFINE; //确定运动模型
//变换矩阵
Mat maWarp;
if (warp_mode == MOTION_HOMOGRAPHY) { //将warp矩阵设置为identity
maWarp = Mat::eye(3, 3, CV_32F);
}
else {
maWarp = Mat::eye(2, 3, CV_32F);
}
int iTteratorNum = 10; //设置迭代次数和阈值,迭代次数大很耗时,效果差别不大
double termination_eps = 1e-10;
TermCriteria criteria(TermCriteria::COUNT + TermCriteria::EPS, iTteratorNum, termination_eps);
//将蓝色和绿色通道弯曲到红色通道
for (int i = 0; i < 2; i++) {
double cc = findTransformECC(GetGradient(channels[2]), GetGradient(channels[i]), maWarp, warp_mode, criteria);
cout << "warp_matrix : " << maWarp << ",CC " << cc << endl;
if (cc == -1) {
cerr << "执行被打断了。相关值将被最小化,检查变换初始化和/或图像的大小" << endl;
}
if (warp_mode == MOTION_HOMOGRAPHY) {
warpPerspective(channels[i], vAligned[i], maWarp, vAligned[0].size(), INTER_LINEAR + WARP_INVERSE_MAP); //当转换是同形转换时,使用透视
}
else {
warpAffine(channels[i], vAligned[i], maWarp, vAligned[0].size(), INTER_LINEAR + WARP_INVERSE_MAP); //当变换不是单应变换时使用仿射
}
}
Mat maAligned;
merge(vAligned, maAligned); //合并通道
imshow("Aligned Image", maAligned);
}
#define MAX_SLIDER_VALUE 255
#define NUM_EIGEN_FACES 10 //主成分个数
int sliderValues[NUM_EIGEN_FACES]; //不同特征向量的权值
Mat averageFace; //矩阵的平均(均值)
vector<Mat> eigenFaces; //特征向量
/* 通过将加权特征面添加到平均面来计算最终图像 */
void createNewFace(int, void *)
{
Mat output = averageFace.clone();
for (int i = 0; i < NUM_EIGEN_FACES; i++) { //将特征面与权重相加
double weight = sliderValues[i] - MAX_SLIDER_VALUE / 2; //OpenCV不允许滑块值为负值
output = output + eigenFaces[i] * weight;
}
resize(output, output, Size(), 2, 2);
imshow("Result", output);
}
/* 重置滑块值 */
void resetSliderValues(int event, int x, int y, int flags, void* userdata)
{
if (event == EVENT_LBUTTONDOWN) {
for (int i = 0; i < NUM_EIGEN_FACES; i++) {
sliderValues[i] = 128;
setTrackbarPos("Weight" + to_string(i), "Trackbars", MAX_SLIDER_VALUE / 2);
}
createNewFace(0, 0);
}
}
/* Eigenface Image */
void PictureAlgorithm::test72()
{
string dirName = "../x64/Debug/picture/EigenFace/";
vector<Mat> images; //从目录中读取图像
struct dirent *ent;
int count = 0;
vector<string> files;
QDir* dirinfo = new QDir(dirName.c_str());
if (!dirinfo->exists()) {
delete dirinfo, dirinfo = nullptr;
return;
}
QStringList dirList = dirinfo->entryList(QDir::Files);
for(int i = 0; i < dirList.size(); i++) {
string fname = dirList[i].toStdString();
string path = dirName + fname;
Mat img = imread(path);
img.convertTo(img, CV_32FC3, 1 / 255.0);
images.push_back(img);
//垂直翻转的人脸图像
Mat imgFlip;
flip(img, imgFlip, 1);
images.push_back(imgFlip);
}
Size sz = images[0].size();
Mat data(static_cast<int>(images.size()), images[0].rows * images[0].cols * 3, CV_32F); //为PCA创建数据矩阵
for (unsigned int i = 0; i < images.size(); i++) { //将图像转换成数据矩阵中的一行向量
Mat image = images[i].reshape(1, 1); //重新设置通道行数大小
image.copyTo(data.row(i));
}
//计算PCA
cout << "Calculating PCA ..." << endl;;
PCA pca(data, Mat(), PCA::DATA_AS_ROW, NUM_EIGEN_FACES); //提取十个主成分
cout << "Calculating PCA complete" << endl;
averageFace = pca.mean.reshape(3, sz.height); //获得均值图
Mat eigenVectors = pca.eigenvectors; //寻找eign向量
for (int i = 0; i < NUM_EIGEN_FACES; i++) { //获得Eign图
Mat eigenFace = eigenVectors.row(i).reshape(3, sz.height);
eigenFaces.push_back(eigenFace);
}
Mat output;
resize(averageFace, output, Size(), 2, 2); //图像长宽都变成原来的两倍
namedWindow("Result", WINDOW_AUTOSIZE);
imshow("Result", output);
namedWindow("Trackbars", WINDOW_FULLSCREEN);
for (int i = 0; i < NUM_EIGEN_FACES; i++) { //滑动窗格
sliderValues[i] = MAX_SLIDER_VALUE / 2;
createTrackbar("Weight" + to_string(i), "Trackbars", &sliderValues[i], MAX_SLIDER_VALUE, createNewFace);
}
setMouseCallback("Result", resetSliderValues); //可以通过点击平均图像来重置滑块
}
/* 获取高动态范围成像HDR */
void PictureAlgorithm::test73()
{
vector<Mat> images;
vector<float> times; //曝光时间
int numImages = 4;
static const float timesArray[] = {1 / 30.0f, 0.25, 2.5, 15.0}; //图像曝光时间
times.assign(timesArray, timesArray + numImages);
Mat im = imread("../x64/Debug/picture/HDR/1.jpg");
images.push_back(im);
im = imread("../x64/Debug/picture/HDR/2.jpg");
images.push_back(im);
im = imread("../x64/Debug/picture/HDR/3.jpg");
images.push_back(im);
im = imread("../x64/Debug/picture/HDR/4.jpg");
images.push_back(im);
//图像对齐
Ptr<AlignMTB> alignMTB = createAlignMTB();
alignMTB->process(images, images);
//获得CRF
Mat responseDebevec;
Ptr<CalibrateDebevec> calibrateDebevec = createCalibrateDebevec();
calibrateDebevec->process(images, responseDebevec, times);
//图像合并为HDR图像
Mat hdrDebevec;
Ptr<MergeDebevec> mergeDebevec = createMergeDebevec();
mergeDebevec->process(images, hdrDebevec, times, responseDebevec);
imwrite("hdrDebevec.jpg", hdrDebevec);
//色调映射算法
Mat ldrDrago;
Ptr<TonemapDrago> tonemapDrago = createTonemapDrago(1.0, 0.7);
tonemapDrago->process(hdrDebevec, ldrDrago);
ldrDrago = 3 * ldrDrago;
imwrite("TonemapDrago.jpg", ldrDrago * 255);
//色调映射算法
Mat ldrReinhard;
Ptr<TonemapReinhard> tonemapReinhard = createTonemapReinhard(1.5, 0, 0, 0);
tonemapReinhard->process(hdrDebevec, ldrReinhard);
imwrite("TonemapReinhard.jpg", ldrReinhard * 255);
//色调映射算法
Mat ldrMantiuk;
Ptr<TonemapMantiuk> tonemapMantiuk = createTonemapMantiuk(2.2, 0.85, 1.2);
tonemapMantiuk->process(hdrDebevec, ldrMantiuk);
ldrMantiuk = 3 * ldrMantiuk;
imwrite("TonemapMantiuk.jpg", ldrMantiuk * 255);
//图像融合
Mat exposureFusion;
Ptr<MergeMertens> mergeMertens = createMergeMertens();
mergeMertens->process(images, exposureFusion);
imwrite("exposure-fusion.jpg", exposureFusion * 255);
}
/* */
void PictureAlgorithm::test74()
{
}
opencv自测程序(算法函数)
于 2023-09-18 10:17:21 首次发布