1.修改前的版本:
gamma校正完是否需要将图像归一化到0~255,为什么整幅图像的归一化结果都是0。
//gamma校正y,error!?!?!?
void gamma_adjust(IplImage* src, IplImage* dst, const double& gamma)
{
assert(src->depth == IPL_DEPTH_8U && src->nChannels == 1 && gamma > 0);
IplImage* temp = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
IplImage* temp2 = cvCloneImage(temp);
cvConvertScale(src, temp);
for (int y = 0; y != src->height; ++y)
{
for (int x = 0; x != src->width; ++x)
{
uchar* p = (uchar*)(src->imageData + y*src->widthStep + x);
double p2 = (double )*p;
double* q = (double *)(temp->imageData + y*temp->widthStep + x);
*q = pow(p2, gamma);
}
}
double max , min;
cvMinMaxLoc(temp, & min, &max );
cvConvertScale(temp, dst, 255.0/( max - min ), -255.0*min/( max - min ));
/*cvMinMaxLoc(temp2, &min, &max);*/
//do_normalize(temp2, temp2);
//do_normalize(temp, dst);
}
2.修改后的版本:
用指针访问矩阵元素的方法比较高效,但是并不安全,出错的原因可能就在此。所以,改为用宏定义访问每个像元CV_MAT_ELEM( matrix, elemtype, row, col )
//gamma校正y
void gamma_adjust(IplImage* src, IplImage* dst, const float& gamma)
{
assert(src->depth == IPL_DEPTH_8U && src->nChannels == 1 && gamma > 0);
CvMat* temp = cvCreateMat(src->height, src->width, CV_32FC1);
CvMat* temp2 = cvCreateMat(src->height, src->width, CV_32FC1);
cvConvertScale(src, temp);
for (int y = 0; y != src->height; ++y)
{
for (int x = 0; x != src->width; ++x)
{
float p = CV_MAT_ELEM(*temp, float , y, x);
*(( float*)CV_MAT_ELEM_PTR(*temp2, y, x)) = pow(p, gamma);
}
}
//double max, min;
//cvMinMaxLoc(temp2, &min, &max);
cvConvertScale(temp, dst, 255.0/(max - min), -255.0*min/(max - min));
cvMinMaxLoc(dst, &min, &max);
//cvConvertScale(temp2, dst, 255.0/(max - min), -255.0*min/(max - min));
do_normalize(temp2, dst);
cvReleaseMat(&temp);
cvReleaseMat(&temp2);
/*cvMinMaxLoc(temp2, &min, &max);*/
//do_normalize(temp2, temp2);
//do_normalize(temp, dst);
}
gamma校正完是否需要将图像归一化到0~255,为什么整幅图像的归一化结果都是0。
//gamma校正y,error!?!?!?
void gamma_adjust(IplImage* src, IplImage* dst, const double& gamma)
{
assert(src->depth == IPL_DEPTH_8U && src->nChannels == 1 && gamma > 0);
IplImage* temp = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
IplImage* temp2 = cvCloneImage(temp);
cvConvertScale(src, temp);
for (int y = 0; y != src->height; ++y)
{
for (int x = 0; x != src->width; ++x)
{
uchar* p = (uchar*)(src->imageData + y*src->widthStep + x);
double p2 = (double )*p;
double* q = (double *)(temp->imageData + y*temp->widthStep + x);
*q = pow(p2, gamma);
}
}
double max , min;
cvMinMaxLoc(temp, & min, &max );
cvConvertScale(temp, dst, 255.0/( max - min ), -255.0*min/( max - min ));
/*cvMinMaxLoc(temp2, &min, &max);*/
//do_normalize(temp2, temp2);
//do_normalize(temp, dst);
}
2.修改后的版本:
用指针访问矩阵元素的方法比较高效,但是并不安全,出错的原因可能就在此。所以,改为用宏定义访问每个像元CV_MAT_ELEM( matrix, elemtype, row, col )
//gamma校正y
void gamma_adjust(IplImage* src, IplImage* dst, const float& gamma)
{
assert(src->depth == IPL_DEPTH_8U && src->nChannels == 1 && gamma > 0);
CvMat* temp = cvCreateMat(src->height, src->width, CV_32FC1);
CvMat* temp2 = cvCreateMat(src->height, src->width, CV_32FC1);
cvConvertScale(src, temp);
for (int y = 0; y != src->height; ++y)
{
for (int x = 0; x != src->width; ++x)
{
float p = CV_MAT_ELEM(*temp, float , y, x);
*(( float*)CV_MAT_ELEM_PTR(*temp2, y, x)) = pow(p, gamma);
}
}
//double max, min;
//cvMinMaxLoc(temp2, &min, &max);
cvConvertScale(temp, dst, 255.0/(max - min), -255.0*min/(max - min));
cvMinMaxLoc(dst, &min, &max);
//cvConvertScale(temp2, dst, 255.0/(max - min), -255.0*min/(max - min));
do_normalize(temp2, dst);
cvReleaseMat(&temp);
cvReleaseMat(&temp2);
/*cvMinMaxLoc(temp2, &min, &max);*/
//do_normalize(temp2, temp2);
//do_normalize(temp, dst);
}