自适应图像色彩增强算法

ImageEnhance.h

#pragma once
#include <iostream>
#include <io.h>
#include <fstream>
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"

using namespace cv;
using namespace std;

#define Openmp_Threads1 2  //白平衡算法多线程数
#define Openmp_Threads2 4  //对比度算法多线程数

struct ImageEnhanceParam
{
	// if 0: 不做增强
	int doImageEnhance = 1;  

	//建议0.01-0.1,越大图像越加亮白色
	float ThresholdRatio = 0.1; 

	// 建议0.85-0.95越小图像亮白色越低
	float BrightnessScale = 0.96;

	// 建议1或2,越大图像对比度越强
	int winSize = 1;

	// 建议5-12,越大图像对比度越强
	double maxCG = 3.5;
};
void PrintCostTime(const char* str, double& t1, double& t2);
string SplitString(const string& filePath);
void getFiles(string path, vector<string>& files);

//自动彩色均衡
Mat PerfectReflectionAlgorithm(Mat& src, const float& ThresholdRatio);
Mat PerfectReflectionAlgorithmFast(Mat& src, const float& ThresholdRatio, const float& BrightnessScale);
Mat PerfectReflectionAlgorithmFast2(Mat& src, const float& ThresholdRatio, const float& BrightnessScale);
Mat AutoColorEqualization(Mat& src, ImageEnhanceParam& IEP);

//自适应对比度增强
Mat Intergral_2(const Mat& image);
int getVarianceMeanFast(const Mat &_image, Mat &_mean, Mat &_std, int d); //d=winSize:窗口半径
bool getVarianceMean(Mat& src, Mat& meanDst, Mat& varianceDst, int winSize);
bool adaptContrastEnhancement(Mat& src, Mat& dst, int winSize, double maxCG);
int ACE(const Mat &_image, Mat &_result, int _d, int _Scale, double _MaxCG);
void AutoContrastEnhancement(Mat& src, Mat& dst, ImageEnhanceParam& IEP);

/*
图像增强接口函数,增强图像亮度和对比度
src: 输入图像
dst: 输出图像
IEP: 图像增强可调参数,设成配置文件形式
*/
int ImageEnhance(Mat& src, Mat& dst, ImageEnhanceParam& IEP);

ImageEnhance.cpp

#include "ImageEnhance.h"

#define eps 1e-5

void PrintCostTime(const char* str, double& t1, double& t2) {
	double t = (t2 - t1) * 1000 / cv::getTickFrequency();
	printf("%s ===> %.2f ms\n", str, t);
}

string SplitString(const string& filePath)
{
	string c = "\\";
	int pos1 = filePath.find_last_of(c);
	string imgName(filePath.substr(pos1 + 1));
	/*
	char szDrive[_MAX_DRIVE];   //磁盘名
	char szDir[_MAX_DIR];       //路径名
	char imgName[_MAX_FNAME];   //文件名
	char imgExt[_MAX_EXT];       //后缀名
	_splitpath_s(filePath, szDrive, szDir, imgName, imgExt); //分解路径
	*/
	return imgName;
}

void getFiles(string path, vector<string>& files)
{
	//文件句柄
	intptr_t hFile = 0;
	//文件信息
	struct _finddata_t fileinfo;
	string p;
	char* files_format[2] = { "\\*.jpg" ,"\\*.png" };
	for (int i = 0; i < sizeof(files_format) / sizeof(char*); i++) {
		p.assign(path).append(files_format[i]);
		hFile = _findfirst(p.c_str(), &fileinfo);
		if (hFile != -1)
		{
			do
			{
				//如果是目录,迭代之,如果不是,加入列表
				if ((fileinfo.attrib &  _A_SUBDIR))
				{
					if (strcmp(fileinfo.name, ".") != 0 && strcmp(fileinfo.name, "..") != 0)
						getFiles(p.assign(path).append("\\").append(fileinfo.name), files);
				}
				else
				{
					files.push_back(p.assign(path).append("\\").append(fileinfo.name));
				}
			} while (_findnext(hFile, &fileinfo) == 0);
			_findclose(hFile);
		}
	}
}

Mat PerfectReflectionAlgorithm(Mat& src, const float& ThresholdRatio) {
	int row = src.rows;
	int col = src.cols;
	Mat dst(row, col, CV_8UC3);
	int HistRGB[767] = { 0 };
	int MaxVal = 0;
#pragma omp parallel for num_threads(Openmp_Threads1)
	for (int i = 0; i < row; i++) {
		for (int j = 0; j < col; j++) {
			MaxVal = max(MaxVal, (int)src.at<Vec3b>(i, j)[0]);
			MaxVal = max(MaxVal, (int)src.at<Vec3b>(i, j)[1]);
			MaxVal = max(MaxVal, (int)src.at<Vec3b>(i, j)[2]);
			int sum = src.at<Vec3b>(i, j)[0] + src.at<Vec3b>(i, j)[1] + src.at<Vec3b>(i, j)[2];
			HistRGB[sum]++;
		}
	}
	int Threshold = 0;
	int sum = 0;
	for (int i = 766; i >= 0; i--) {
		sum += HistRGB[i];
		if (sum > row * col * ThresholdRatio) {
			Threshold = i;
			break;
		}
	}
	int AvgB = 0;
	int AvgG = 0;
	int AvgR = 0;
	int cnt = 0;
#pragma omp parallel for num_threads(Openmp_Threads1)
	for (int i = 0; i < row; i++) {
		for (int j = 0; j < col; j++) {
			int sumP = src.at<Vec3b>(i, j)[0] + src.at<Vec3b>(i, j)[1] + src.at<Vec3b>(i, j)[2];
			if (sumP > Threshold) {
				AvgB += src.at<Vec3b>(i, j)[0];
				AvgG += src.at<Vec3b>(i, j)[1];
				AvgR += src.at<Vec3b>(i, j)[2];
				cnt++;
			}
		}
	}
	AvgB /= cnt;
	AvgG /= cnt;
	AvgR /= cnt;
#pragma omp parallel for num_threads(Openmp_Threads1)
	for (int i = 0; i < row; i++) {
		for (int j = 0; j < col; j++) {
			int Blue = src.at<Vec3b>(i, j)[0] * MaxVal / AvgB;
			int Green = src.at<Vec3b>(i, j)[1] * MaxVal / AvgG;
			int Red = src.at<Vec3b>(i, j)[2] * MaxVal / AvgR;
			if (Red > 255) {
				Red = 255;
			}
			else if (Red < 0) {
				Red = 0;
			}
			if (Green > 255) {
				Green = 255;
			}
			else if (Green < 0) {
				Green = 0;
			}
			if (Blue > 255) {
				Blue = 255;
			}
			else if (Blue < 0) {
				Blue = 0;
			}
			dst.at<Vec3b>(i, j)[0] = Blue;
			dst.at<Vec3b>(i, j)[1] = Green;
			dst.at<Vec3b>(i, j)[2] = Red;
		}
	}
	return dst;
}

Mat PerfectReflectionAlgorithmFast(Mat& src, const float& ThresholdRatio, const float& BrightnessScale) {
	int row = src.rows;
	int col = src.cols;
	Mat dst(row, col, CV_8UC3);
	int HistRGB[767] = { 0 };
	int MaxVal = 0;
#pragma omp parallel for num_threads(Openmp_Threads1)
	for (int i = 0; i < row; i++) {
		//uchar* uc_pixel = src.data + row * src.step;
		const uchar* uc_pixel1 = src.ptr<uchar>(i);
		for (int j = 0; j < col; j++) {
			MaxVal = max(MaxVal, static_cast<int>(uc_pixel1[0]));
			MaxVal = max(MaxVal, static_cast<int>(uc_pixel1[1]));
			MaxVal = max(MaxVal, static_cast<int>(uc_pixel1[2]));

			int sum = uc_pixel1[0] + uc_pixel1[1] + uc_pixel1[2];
			HistRGB[sum]++;
			uc_pixel1 += 3;
		}
	}
	int Threshold = 0;
	int sum2 = 0;
	for (int i = 766; i >= 0; i--) {
		sum2 += HistRGB[i];
		if (sum2 > row * col * ThresholdRatio) {
			Threshold = i;
			break;
		}
	}
	float AvgB = 0;
	float AvgG = 0;
	float AvgR = 0;
	int pixCount = 0;
#pragma omp parallel for num_threads(Openmp_Threads1)
	for (int i = 0; i < row; i++) {
		const uchar* uc_pixel2 = src.ptr<uchar>(i);
		for (int j = 0; j < col; j++) {
			int sumP = uc_pixel2[0] + uc_pixel2[1] + uc_pixel2[2];
			if (sumP > Threshold) {
				AvgB += uc_pixel2[0];
				AvgG += uc_pixel2[1];
				AvgR += uc_pixel2[2];
				pixCount++;
			}
			uc_pixel2 += 3;
		}
	}
	if (pixCount == 0 || MaxVal == 0) {
		dst = src.clone(); // do not process
		return dst;
	}
	AvgB /= pixCount;
	AvgG /= pixCount;
	AvgR /= pixCount;
	AvgB += eps;
	AvgG += eps;
	AvgR += eps;
	//量化0-255
#pragma omp parallel for num_threads(Openmp_Threads1)
	for (int i = 0; i < row; i++) {
		const uchar* uc_pixel3 = src.ptr<uchar>(i);
		uchar* dst_pixel = dst.ptr<uchar>(i);
		for (int j = 0; j < col; j++) {
			float Blue = BrightnessScale * static_cast<float>(uc_pixel3[0]) * MaxVal / AvgB;
			float Green = BrightnessScale * static_cast<float>(uc_pixel3[1]) * MaxVal / AvgG;
			float Red = BrightnessScale * static_cast<float>(uc_pixel3[2]) * MaxVal / AvgR;

			if (Red > 255) { Red = 255; }
			else if (Red < 0) { Red = 0; }

			if (Green > 255) { Green = 255; }
			else if (Green < 0) { Green = 0; }

			if (Blue > 255) { Blue = 255; }
			else if (Blue < 0) { Blue = 0; }
			dst_pixel[0] = static_cast<uchar>(Blue);
			dst_pixel[1] = static_cast<uchar>(Green);
			dst_pixel[2] = static_cast<uchar>(Red);
			uc_pixel3 += 3;
			dst_pixel += 3;
		}
	}
	return dst;
}

Mat PerfectReflectionAlgorithmFast2(Mat& src, const float& ThresholdRatio, const float& BrightnessScale)
{
	int histRGBSum[255 * 3 + 1] = { 0 };
	cv::Mat dst = cv::Mat::zeros(src.size(), src.type());
	//统计R+G+B
	uchar maxValue[3] = { 0 };
#pragma omp parallel for num_threads(Openmp_Threads1)
	for (int i = 0; i < src.rows; i++)
	{
		const uchar *ptrSrc = src.ptr<uchar>(i);
		for (int j = 0; j < src.cols; j++)
		{
			int sum = *(ptrSrc + 3 * j) + *(ptrSrc + 3 * j + 1) + *(ptrSrc + 3 * j + 2);
			histRGBSum[sum]++;
			maxValue[0] = std::max(maxValue[0], *(ptrSrc + 3 * j));
			maxValue[1] = std::max(maxValue[1], *(ptrSrc + 3 * j + 1));
			maxValue[2] = std::max(maxValue[2], *(ptrSrc + 3 * j + 2));
		}
	}

	//计算R+G+B的数量超过像素总数的ratio的像素值
	double sum = 0.0;
	int thresholdValue = 0;
	for (int i = 765; i >= 0; i--)
	{
		sum += histRGBSum[i];
		if (sum > src.rows * src.cols * ThresholdRatio)
		{
			thresholdValue = i;
			break;
		}
	}

	//计算R+G+B大于阈值的所有点的均值
	double avgB = 0.0;
	double avgG = 0.0;
	double avgR = 0.0;
	int pixCount = 0;
	for (int i = 0; i < src.rows; i++)
	{
		const uchar *ptrSrc = src.ptr<uchar>(i);
		for (int j = 0; j < src.cols; j++)
		{
			int sum = *(ptrSrc + 3 * j) + *(ptrSrc + 3 * j + 1) + *(ptrSrc + 3 * j + 2);
			if (sum > thresholdValue)
			{
				avgB += *(ptrSrc + 3 * j);
				avgG += *(ptrSrc + 3 * j + 1);
				avgR += *(ptrSrc + 3 * j + 2);
				pixCount++;
			}
		}
	}
	if (pixCount == 0 || maxValue[0] == 0 || maxValue[1] == 0 || maxValue[2] == 0) {
		dst = src.clone();
		return dst;
	}
	avgB /= pixCount;
	avgG /= pixCount;
	avgR /= pixCount;
	avgB += eps;
	avgG += eps;
	avgR += eps;
	//量化0-255
#pragma omp parallel for num_threads(Openmp_Threads1)
	for (int i = 0; i < src.rows; i++)
	{
		const uchar *ptrSrc = src.ptr<uchar>(i);
		uchar *ptrDst = dst.ptr<uchar>(i);
		for (int j = 0; j < src.cols; j++)
		{
			double blue = BrightnessScale * (double)*(ptrSrc + 3 * j) / avgB * maxValue[0];
			double green = BrightnessScale * (double)*(ptrSrc + 3 * j + 1) / avgG * maxValue[1];
			double red = BrightnessScale * (double)*(ptrSrc + 3 * j + 2) / avgR * maxValue[2];
			blue = std::min(std::max((double)0, blue), (double)255);
			green = std::min(std::max((double)0, green), (double)255);
			red = std::min(std::max((double)0, red), (double)255);
			*(ptrDst + 3 * j) = (uchar)blue;
			*(ptrDst + 3 * j + 1) = (uchar)green;
			*(ptrDst + 3 * j + 2) = (uchar)red;
		}
	}
	return dst;

}
Mat AutoColorEqualization(Mat& src, ImageEnhanceParam& IEP)
{
	const float ThresholdRatio = IEP.ThresholdRatio;
	const float BrightnessScale = IEP.BrightnessScale;
	Mat dst = PerfectReflectionAlgorithmFast(src, ThresholdRatio, BrightnessScale);
	//Mat dst = PerfectReflectionAlgorithmFast2(src, ThresholdRatio, BrightnessScale);
	return dst;
}

void AutoContrastEnhancement(Mat& src, Mat& dst, ImageEnhanceParam& IEP)
{
	//方法1
	int winSize = IEP.winSize;
	double maxCG = IEP.maxCG;
	if (!adaptContrastEnhancement(src, dst, winSize, maxCG))
	{
		//cout << "adaptContrastEnhancement Error..." << endl;
		dst = src.clone();
	}
	//方法2
	//ACE(src, dst);
}

bool getVarianceMean(Mat &src, Mat &meansDst, Mat &varianceDst, int winSize)
{
	if (winSize % 2 == 0)
	{
		cout << "winSize shoule be an odd number" << endl;
		return false;
	}
	double t1 = cv::getTickCount();
	Mat copyBorder_yChannels;  //扩充图像边界;
	int copyBorderSize = (winSize - 1) / 2;  //copyBorderSize=1
	copyMakeBorder(src, copyBorder_yChannels, copyBorderSize, copyBorderSize, copyBorderSize, copyBorderSize, BORDER_REFLECT);
	int for_row = copyBorder_yChannels.rows - copyBorderSize;
	int for_col = copyBorder_yChannels.cols - copyBorderSize;
	double t2 = cv::getTickCount();
	PrintCostTime("getVarianceMean:", t1, t2);
	double t3 = cv::getTickCount();
	double t4 = cv::getTickCount();
	PrintCostTime("getVarianceMean22222:", t3, t4);
//#pragma omp parallel for num_threads(Openmp_Threads2)
	for (int i = copyBorderSize; i < for_row; i++)
	{
		float* variance_pixel = varianceDst.ptr<float>(i - copyBorderSize);
		float* mean_pixel = meansDst.ptr<float>(i - copyBorderSize);
		for (int j = copyBorderSize; j < for_col; j++)
		{
			//截取扩展后的图像中的一个方块;
			Mat temp = copyBorder_yChannels(Rect(j - copyBorderSize, i - copyBorderSize, winSize, winSize));
			Scalar  mean;
			Scalar  dev;
			meanStdDev(temp, mean, dev);
			//varianceDst.at<float>(i - copyBorderSize, j - copyBorderSize) = dev.val[0];     ///一一对应赋值;
			//meansDst.at<float>(i - copyBorderSize, j - copyBorderSize) = mean.val[0];
			*variance_pixel = dev.val[0];
			*mean_pixel = mean.val[0];
			variance_pixel++;
			mean_pixel++;
		}
	}
	return true;
}
int getVarianceMeanFast(const Mat &_image, Mat &_mean, Mat &_std, int d)
{
	if (_image.channels() == 1)
	{
		_mean.create(_image.size(), CV_64FC1);
		_std.create(_image.size(), CV_64FC1);

	}
	else if (_image.channels() == 3)
	{
		_mean.create(_image.size(), CV_64FC3);
		_std.create(_image.size(), CV_64FC3);
	}
	//边界填充
	Mat image_big;
	copyMakeBorder(_image, image_big, d + 1, d + 1, d + 1, d + 1, BORDER_REFLECT_101);
	image_big.convertTo(image_big, _mean.type());
	Mat image_big_2 = image_big.mul(image_big);
	Mat Intergral_image1 = Intergral_2(image_big);
	Mat Intergral_image2 = Intergral_2(image_big_2);

	int N = (2 * d + 1)*(2 * d + 1);
	int c = _image.channels();
	int nr = _image.rows;
	int nc = _image.cols*c;

	//cout << Intergral_image1 << endl;
	//cout << Intergral_image2 << endl;
#pragma omp parallel for num_threads(Openmp_Threads2)
	for (int i = 0; i < nr; i++)
	{
		double* outData1 = _mean.ptr<double>(i);
		double* outData2 = _std.ptr<double>(i);
		double* inDataUp1 = Intergral_image1.ptr<double>(i);
		double* inDataUp2 = Intergral_image2.ptr<double>(i);
		double* inDataDown1 = Intergral_image1.ptr<double>(i + 2 * d + 1);
		double* inDataDown2 = Intergral_image2.ptr<double>(i + 2 * d + 1);
		for (int j = 0; j < nc; j++)
		{
			double sumi1 = inDataDown1[j + (2 * d + 1)*c] + inDataUp1[j] - inDataUp1[j + (2 * d + 1)*c] - inDataDown1[j];
			double sumi2 = inDataDown2[j + (2 * d + 1)*c] + inDataUp2[j] - inDataUp2[j + (2 * d + 1)*c] - inDataDown2[j];
			outData1[j] = sumi1 / N;
			outData2[j] = (sumi2 - sumi1*outData1[j]) / N;
		}
	}
	cv::sqrt(_std, _std);
	return 0;
}
Mat Intergral_2(const Mat& image)
{
	Mat result;
	Mat image_2;
	if (image.channels() == 1)
	{
		image.convertTo(image_2, CV_64FC1);
		result.create(image.size(), CV_64FC1);

	}
	else if (image.channels() == 3)
	{
		image.convertTo(image_2, CV_64FC3);
		result.create(image.size(), CV_64FC3);
	}
	//cout << image_2 << endl;
	int c = image_2.channels();
	int nr = image_2.rows;
	int nc = image_2.cols*c;
	for (int i = 0; i < nr; i++)
	{
		const double* inData = image_2.ptr<double>(i);
		double* outData = result.ptr<double>(i);
		if (i != 0)
		{
			const double* outData_up = result.ptr<double>(i - 1);
			for (int j = 0; j < nc; j++)
			{
				if (j >= c)
				{
					outData[j] = inData[j] + outData_up[j] + outData[j - c] - outData_up[j - c];
				}
				else
				{
					outData[j] = inData[j] + outData_up[j];
				}
			}
		}
		else
		{
			for (int j = 0; j < nc; j++)
			{
				if (j >= c)
				{
					outData[j] = inData[j] + outData[j - c];
				}
				else
				{
					outData[j] = inData[j];
				}
			}
		}
	}
	return result;
}

bool adaptContrastEnhancement(Mat &src, Mat &dst, int winSize, double maxCg)
{
	if (!src.data)  //判断图像是否被正确读取;
	{
		//cerr << "load image error...";
		return false;
	}

	Mat ycc;        //转换空间到YCrCb;
	cvtColor(src, ycc, COLOR_RGB2YCrCb);

	vector<Mat> channels(3);        //分离通道;
	split(ycc, channels);

	Mat localMeansMatrix(src.rows, src.cols, CV_64FC1);
	Mat localVarianceMatrix(src.rows, src.cols, CV_64FC1);
	//对Y通道进行增强;
	
	/*if (!getVarianceMean(channels[0], localMeansMatrix, localVarianceMatrix, winSize))
	{
		//cerr << "getVarianceMean Error...";
		cout << "getVarianceMean Error..." << endl;;
		return false;
	}*/
	getVarianceMeanFast(channels[0], localMeansMatrix, localVarianceMatrix, winSize);
	Mat temp = channels[0].clone();

	Scalar  mean;
	Scalar  dev;
	meanStdDev(temp, mean, dev);
	//temp.convertTo(temp, CV_32F, 1 / 255.0);
	float meansGlobal = mean.val[0];
	Mat enhanceMatrix(src.rows, src.cols, CV_8UC1);
#pragma omp parallel for num_threads(Openmp_Threads2)
	for (int i = 0; i < src.rows; i++)  //遍历,对每个点进行自适应调节
	{
		double* localVarianceMatrix_pixel = localVarianceMatrix.ptr<double>(i);
		double* localMeansMatrix_pixel = localMeansMatrix.ptr<double>(i);
		uchar* temp_pixel = temp.ptr<uchar>(i);
		uchar* enhanceMatrix_pixel = enhanceMatrix.ptr<uchar>(i);
		for (int j = 0; j < src.cols; j++)
		{
			if (localVarianceMatrix_pixel[j] >= 0.01)
			{
				double cg = 0.2 * meansGlobal / (localVarianceMatrix_pixel[j] + eps);
				double cgs = cg > maxCg ? maxCg : cg;
				cgs = cgs < 1 ? 1 : cgs;

				int e = localMeansMatrix_pixel[j] + cgs* (temp_pixel[j] - localMeansMatrix_pixel[j]);
				if (e > 255) { e = 255; }
				else if (e < 0) { e = 0; }
				enhanceMatrix_pixel[j] = static_cast<uchar>(e);
			}
			else
			{
				enhanceMatrix_pixel[j] = temp_pixel[j];
			}
			//localVarianceMatrix_pixel++;
			//temp_pixel++;
			//enhanceMatrix_pixel++;

			/*if (localVarianceMatrix.at<float>(i, j) >= 0.01)
			{
				float cg = 0.2*meansGlobal / localVarianceMatrix.at<float>(i, j);
				float cgs = cg > maxCg ? maxCg : cg;
				cgs = cgs < 1 ? 1 : cgs;

				int e = localMeansMatrix.at<float>(i, j) + cgs* (temp.at<uchar>(i, j) - localMeansMatrix.at<float>(i, j));
				if (e > 255) { e = 255; }
				else if (e < 0) { e = 0; }
				enhanceMatrix.at<uchar>(i, j) = e;
			}
			else
			{
				enhanceMatrix.at<uchar>(i, j) = temp.at<uchar>(i, j);
			}*/
		}
	}
	//enhanceMatrix.convertTo(enhanceMatrix, CV_8U, 255.0);
	channels[0] = enhanceMatrix;  //合并通道,转换颜色空间回到RGB
	merge(channels, ycc);
	cvtColor(ycc, dst, COLOR_YCrCb2RGB);
 	return true;
}

int ACE(const Mat &_image, Mat &_result, int _d, int _Scale, double _MaxCG)
{
	Mat ycc;        //转换空间到YCrCb;
	cvtColor(_image, ycc, COLOR_RGB2YCrCb);

	vector<Mat> channels(3);        //分离通道;
	split(ycc, channels);

	Mat localmean, localstd;
	getVarianceMeanFast(channels[0], localmean, localstd, _d);
	if (channels[0].channels() == 1)
	{
		_result.create(_image.size(), CV_64FC1);
	}
	else if (channels[0].channels() == 3)
	{
		_result.create(_image.size(), CV_64FC3);
	}

	Mat mean_m, std_m;
	meanStdDev(channels[0], mean_m, std_m);
	double std[3];
	std[0] = std_m.at<double>(0, 0);
	std[1] = std_m.at<double>(1, 0);
	std[2] = std_m.at<double>(2, 0);

	int c = channels[0].channels();
	int nr = _image.rows;
	int nc = _image.cols*c;
	double CG;
#pragma omp parallel for num_threads(Openmp_Threads2)
	for (int i = 0; i < nr; i++)
	{
		double* meanData = localmean.ptr<double>(i);
		double* stdData = localstd.ptr<double>(i);
		const uchar* imageData = channels[0].ptr<uchar>(i);
		double* outData = _result.ptr<double>(i);
		for (int j = 0; j < nc; j++)
		{
			CG = std[j % c] / stdData[j];
			if (CG > _MaxCG)
				CG = _MaxCG;
			outData[j] = meanData[j] + _Scale*CG*(int(imageData[j]) - meanData[j]);
		}
	}
	_result.convertTo(_result, CV_8UC1);
	channels[0] = _result;  //合并通道,转换颜色空间回到RGB
	merge(channels, ycc);
	cvtColor(ycc, _result, COLOR_YCrCb2RGB);
	return 0;
}

int ImageEnhance(Mat& src, Mat& dst, ImageEnhanceParam& IEP)
{
	if (src.empty()) {
		//cout << "load image error..." << endl;
		return -1;
	}
	//double sum = 0;
	//for (int i = 0; i < 100; i++) {
	//double t1 = cv::getTickCount();
	Mat dst1 = AutoColorEqualization(src, IEP);  //白平衡算法

	AutoContrastEnhancement(dst1, dst, IEP);     //对比度算法
	//double t2 = cv::getTickCount();
	//PrintCostTime("AutoColorEqualization:", t1, t2);
	//double t = (t2 - t1) * 1000 / cv::getTickFrequency();
	//sum += t;
	cv::imwrite("result08.jpg", dst);
	cv::imshow("origin", src);
	cv::imshow("dst1", dst1);
	cv::imshow("result", dst);
	cv::waitKey(0);
	//cv::destroyAllWindows();
	//}
	//sum /= 100;
	//cout << sum <<" ms"<< endl;
	//system("pause");
	return 0;
}

main.cpp

#include "ImageEnhance.h"

int main()
{
	// 传一个Mat给我
	Mat src = imread("E:\\ImageProcess\\ImageEnhance\\secha\\5_u_1.jpg");
	//Mat src = imread("H:\\ImageProcess\\ImageEnhance\\3.png");
	Mat dst;                        // 定义一个输出Mat,算法增强后的图像
	//以下参数做成配置文件可修改
	ImageEnhanceParam IEP;          // 初始化增强算法的参数
	IEP.doImageEnhance = 1;         // if 0: 不做增强
	IEP.ThresholdRatio = 0.1;       // 三个可调参数,一般默认值就行,不用修改
	IEP.BrightnessScale = 0.96;
	IEP.winSize = 1;
	IEP.maxCG = 3.5;

	if (IEP.doImageEnhance == 0) {
		dst = src.clone();   // 不做增强
	}
	else {
		int ieFlag = ImageEnhance(src, dst, IEP);
		if (ieFlag == -1) {
			cout << "load image error..." << endl;
		}
	}
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值