虽然fusion会有一个20bit的容器,但是经过ltm变成12bit,不是单纯的把20bit压成12bit,而是通过curve根据不同阶段的亮度来进行压低亮度,一般低暗环境不会被压暗,中高会被压暗一些,超过12bit的压暗的倍数会更大。
void hdr_fus()
{ // 读取长曝光和短曝光的图像
std::ifstream file_long(“E:/test/hdr/long_2592x1944_12_0.raw”, std::ios::binary);
std::ifstream file_short(“E:/test/hdr/short_2592x1944_12_0.raw”, std::ios::binary);
cv::Mat image20bit;
cv::Mat image12bit = cv::Mat::zeros(image20bit.size(), CV_16U); // 创建一个空的12位图像
// 创建一个vector来存储像素值
int width = 2592;
int height = 1944;
double scale = static_cast<double>((1 << 12) - 1) / ((1 << 20) - 1); // 计算缩放因子
// 创建一个空的cv::Mat对象,深度为32位浮点数
cv::Mat hdrImage_20bit(height, width, CV_32F);
std::vector<uint16_t> pixels_long(width * height);
std::vector<uint16_t> pixels_short(width * height);
file_long.read(reinterpret_cast<char*>(pixels_long.data()), pixels_long.size() * sizeof(uint16_t));
file_short.read(reinterpret_cast<char*>(pixels_short.data()), pixels_short.size() * sizeof(uint16_t));
// 创建cv::Mat对象
cv::Mat longExposure(height, width, CV_16UC1, pixels_long.data());
cv::Mat shortExposure(height, width, CV_16UC1, pixels_short.data());
// 创建一个空的HDR图像
cv::Mat hdrImage = cv::Mat::zeros(longExposure.size(), CV_16UC1);
cv::Mat hdrImage_20_12 = cv::Mat::zeros(longExposure.size(), CV_16UC1);
float pixel20bit;
// 在过曝的部分和物体移动的部分进行叠图
for (int y = 0; y < longExposure.rows; y++)
{
for (int x = 0; x < longExposure.cols; x++)
{
uint16_t longPixel = longExposure.at<uint16_t>(y, x);
uint16_t shortPixel = shortExposure.at<uint16_t>(y, x);
uint16_t smoothPixel;
uint16_t pixel12bit;
if (longPixel > 4040.0 )
{
hdrImage.at<uint16_t>(y, x) = shortPixel;
hdrImage_20bit.at<float>(y, x) = shortPixel*6;
// 将20位图像的像素值范围限制在[0, 2^20-1]
// cv::normalize(hdrImage_20bit, hdrImage_20bit, 0, (1 << 20) - 1, cv::NORM_MINMAX);
if (4096<hdrImage_20bit.at<float>(y, x)< 1<<16 -1)
{
pixel20bit = hdrImage_20bit.at<float>(y, x);//将20bit压缩成12bit
hdrImage.at<uint16_t>(y, x) = pixel20bit / 3;
hdrImage_20_12.at<uint16_t>(y, x) = pixel20bit;
}
}
else
{
if(longPixel > 2000.0)
{
hdrImage_20bit.at<float>(y, x) = longPixel / 1.3;
hdrImage.at<uint16_t>(y, x) = hdrImage_20bit.at<float>(y, x);
}
else
{
hdrImage.at<uint16_t>(y, x) = longPixel;
hdrImage_20bit.at<float>(y, x) = longPixel;
}
// 将20位图像的像素值范围限制在[0, 2^20-1]
// cv::normalize(hdrImage_20bit, hdrImage_20bit, 0, (1 << 20) - 1, cv::NORM_MINMAX);
pixel20bit = hdrImage_20bit.at<float>(y, x);//将20bit压缩成12bit
pixel12bit = static_cast<uint16_t>(pixel20bit * scale); // 计算12位图像的像素值
hdrImage_20_12.at<uint16_t>(y, x) = pixel12bit;
}
if (longPixel< 4040&& longPixel > 3800)
{
float distance = std::abs(longPixel - 3800.0f);
// 计算权重
float weight = std::min(distance / 240.0f, 1.0f);
// 计算加权平均值
uint16_t resultPixel = static_cast<uint16_t>(weight * shortPixel + (1.0f - weight) * longPixel);
hdrImage.at<uint16_t>(y, x) = resultPixel;
hdrImage_20bit.at<float>(y, x) = static_cast<float>(weight * shortPixel*6 + (1.0f - weight) * longPixel);
pixel20bit = hdrImage_20bit.at<float>(y, x);//将20bit压缩成12bit
// 将20位图像的像素值范围限制在[0, 2^20-1]
//cv::normalize(hdrImage_20bit, hdrImage_20bit, 0, (1 << 20) - 1, cv::NORM_MINMAX);
pixel12bit = static_cast<uint16_t>(pixel20bit * scale); // 计算12位图像的像素值
hdrImage_20_12.at<uint16_t>(y, x) = pixel12bit;
}
}
}
// 保存HDR图像
cv::imwrite("E:/test/hdr/result/hdr_image.PNG", hdrImage);
cv::imwrite("E:/test/hdr/result/hdr_image.TIFF", hdrImage);
cv::Mat hdrImage8bit,longExposure_hdrImage8bit, shortExposure_hdrImage8bit;
cv::normalize(hdrImage, hdrImage8bit, 0, 255, cv::NORM_MINMAX, CV_8U);
cv::normalize(longExposure, longExposure_hdrImage8bit, 0, 255, cv::NORM_MINMAX, CV_8U);
cv::normalize(shortExposure, shortExposure_hdrImage8bit, 0, 255, cv::NORM_MINMAX, CV_8U);
cv::imwrite("E:/test/hdr/result/hdr_image_after.jpg", hdrImage8bit);
cv::imwrite("E:/test/hdr/result/longExposure.jpg", longExposure_hdrImage8bit);
cv::imwrite("E:/test/hdr/result/shortExposure.jpg", shortExposure_hdrImage8bit);
这篇文章只是讲述了过曝区域用(se*hdr_ratio)/curve_value,还没有讲述移动区域该怎么办。
按照这边的算法移动区域用se,应该是因为se的shutter小所以用se,取前后两帧的信息,看是否相同区域pixel的value_diff会差很多,差的多则用se,具体实现和论证还需要测试。