利用去雾算法实现低光增强

 [论文阅读] (11)ACE算法和暗通道先验图像去雾算法(Rizzi | 何恺明老师)_暗通道去雾算法-CSDN博客

// https://zhuanlan.zhihu.com/p/500023711?utm_id=0

#include <opencv2/opencv.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/core/core.hpp>
#include<iostream>
#include<vector>
using namespace cv;
using namespace std;

namespace {
    void cv_show(const cv::Mat& one_image, const char* info = "") {
        cv::imshow(info, one_image);
        cv::waitKey(0);
        cv::destroyAllWindows();
    }

    bool cv_write(const cv::Mat& source, const std::string save_path) {
        return cv::imwrite(save_path, source, std::vector<int>({ cv::IMWRITE_PNG_COMPRESSION, 0 }));
    }

    cv::Mat make_pad(const cv::Mat& one_image, const int pad_H, const int pad_W) {
        cv::Mat padded_image;
        cv::copyMakeBorder(one_image, padded_image, pad_H, pad_H, pad_W, pad_W, cv::BORDER_REFLECT);
        return padded_image;
    }
}

// 计算暗通道
template<typename T>
cv::Mat compute_dark_channel(const cv::Mat& origin, const int H, const int W, const int radius = 7) {
    const T* const ori_ptr = origin.ptr<T>();
    // 决定数据类型, 是 uchar 还是 float
    const int TYPE = origin.type() == CV_8UC3 ? CV_8UC1 : CV_32FC1;
    // 【1】先 R, G, B 三通道求一个最小图
    cv::Mat min_bgr(H, W, TYPE);
    T* const min_bgr_ptr = min_bgr.ptr<T>();
    const int length = H * W;
    for (int i = 0; i < length; ++i) {
        const int p = 3 * i;
        min_bgr_ptr[i] = std::min(ori_ptr[p], std::min(ori_ptr[p + 1], ori_ptr[p + 2]));
    }
    // 【2】min_bgr 中每个点, 在窗口中找一个最小值
    // 先对图像做 padding
    auto pad_min_bgr = make_pad(min_bgr, radius, radius);
    const int H2 = H + 2 * radius;
    const int W2 = W + 2 * radius;
    // 存放第一次横向找最小值地结果
    cv::Mat temp(H2, W, TYPE);
    T* const temp_ptr = temp.ptr<T>();
    int cnt = 0;
    // 第一次, 横向找 H2 次
    for (int i = 0; i < H2; ++i) {
        T* const row_ptr = pad_min_bgr.ptr<T>() + i * W2 + radius;
        for (int j = 0; j < W; ++j) {
            T min_value = 255;
            for (int k = -radius; k <= radius; ++k)
                min_value = std::min(min_value, row_ptr[j + k]);
            temp_ptr[cnt++] = min_value;
        }
    }
    // 释放空间
    pad_min_bgr.release();
    // 第二次, 竖向比较
    for (int j = 0; j < W; ++j) {
        for (int i = 0; i < H; ++i) {
            T min_value = 255;
            const int offset = (radius + i) * W + j;
            for (int k = -radius; k <= radius; ++k)
                min_value = std::min(min_value, temp_ptr[offset + k * W]);
            min_bgr_ptr[i * W + j] = min_value;  // 结果直接存放到 min_bgr 里面, 节约空间
        }
    }
    return min_bgr;
}


using details_type = std::list< std::pair<std::string, cv::Mat> >;

details_type dong_enhance(
    const cv::Mat& low_light,
    const int radius = 3,
    const int A_pixels = 100,
    const float weight = 0.8,
    const float border = 0.5,
    const bool denoise = false) {
    details_type collections;
    // 获取信息
    const int H = low_light.rows;
    const int W = low_light.cols;
    const int C = low_light.channels();
    const int length = H * W;
    const int total_length = length * C;
    assert(low_light.type() == 16 and "Only CV_8U3(BGR) are supported !");

    //【1】首先 255 - low_light, 得到反转图像
    cv::Mat inverse(H, W, low_light.type());
    uchar* const inv_ptr = inverse.ptr<uchar>();
    for (int i = 0; i < total_length; ++i)
        inv_ptr[i] = 255 - low_light.data[i];
    collections.emplace_back("inverse", inverse);

    // 【2】对 inverse 去雾, 首先求 inverse 的暗通道
    const auto dark_channel = compute_dark_channel<uchar>(inverse, H, W, radius);
    collections.emplace_back("dark_channel", dark_channel);

    // 【3】根据暗通道, 排序筛选最大值的前 100 个点
    // 在 有雾图像中去找 rgb 之和最大值作为对三个通道大气光 A 的估计
    std::vector< std::vector<int> > book(256);
    const uchar* const dark_ptr = dark_channel.ptr<uchar>();
    for (int i = 0; i < length; ++i)
        book[dark_ptr[i]].emplace_back(i);
    int cnt = 0;
    std::vector<int> index(A_pixels);  // 找到暗通道最大的 100 个点坐标
    for (int i = 255; i >= 0; --i) {
        const int _size = book[i].size(); // 这里是 O(1)
        for (int t = 0; t < _size and cnt < A_pixels; ++t)
            index[cnt++] = book[i][t];
    }
    int max_bgr_sum = 0;  // 从这 100 个暗通道最大值对应 有雾图像 中, 找 r, g, b 之和最大的点
    int max_index = -1;
    for (int i = 0; i < A_pixels; ++i) {
        const int p = 3 * index[i];
        const int cur_sum = inv_ptr[p] + inv_ptr[p + 1] + inv_ptr[p + 2];
        if (cur_sum > max_bgr_sum) {
            max_bgr_sum = cur_sum;
            max_index = index[i];
        }
    }
    // 现在 max_index 是最大的 r, g, b 的通道
    int A[3] = { inv_ptr[max_index * 3], inv_ptr[max_index * 3 + 1], inv_ptr[max_index * 3 + 2] };
    std::cout << "三个通道的全局大气光估计值 " << A[0] << ", " << A[1] << ", " << A[2] << "\n";

    // 【4】估计 t
    // 准备一个 float 图, 每个通道的像素除以对应的全局大气光
    cv::Mat R_A(H, W, CV_32FC3);
    float* const R_A_ptr = R_A.ptr<float>();
    for (int i = 0; i < length; ++i) {
        const int p = 3 * i;
        for (int c = 0; c < 3; ++c)
            R_A_ptr[p + c] = inv_ptr[p + c] * 1.f / A[c];
    }
    // 根据比值的暗通道, 得到透射率(这里的 R_A 是 CV_32FC1 了, 节约空间)
    R_A = compute_dark_channel<float>(R_A, H, W, radius);
    float* t_ptr = R_A.ptr<float>();

    // 对透射率做点小改变, 远景增强地更厉害点
    auto discount = [border](const float x) ->float {
        return x;
        if (x >= 0 and x < border) return 2 * x * x;
        else return x;
    };
    for (int i = 0; i < length; ++i)
        t_ptr[i] = discount(1.f - weight * t_ptr[i]);

    // 开始求解 J(x), 然后取反
    cv::Mat result(H, W, CV_8UC3);
    uchar* const res_ptr = result.ptr<uchar>();
    for (int i = 0; i < length; ++i) {
        const int p = 3 * i;
        for (int c = 0; c < 3; ++c) {
            const float J = (inv_ptr[p + c] - A[c]) * 1.f / t_ptr[i] + A[c];  // 偏大
            res_ptr[p + c] = cv::saturate_cast<uchar>(J);
        }
    }
    // 做去噪
    if (denoise) {
        cv::fastNlMeansDenoisingColored(result, result, 5, 5, 5, 15);
    }
    collections.emplace_back("dehazed", result.clone());

    // 反转图像
    for (int i = 0; i < total_length; ++i)
        res_ptr[i] = 255 - res_ptr[i]; // 更大
    collections.emplace_back("enhanced", result.clone());
    return collections;
}


int main() {
    // 读取图像
    cv::Mat low_light = cv::imread("D:\\opencv\\Project\\test\\x64\\Debug\\5.bmp"); // "../datasets/LOL/775.png"

    cv_show(low_light);

    // 开始增强
    auto collections = dong_enhance(low_light, 3, 100, 0.8, 0.5, false);

    // 展示
    for (const auto& item : collections) {
        cv_show(item.second, item.first.c_str());
        cv_write(item.second, "D:\\opencv\\Project\\test\\x64\\Debug\\output\\" + item.first + ".png");
    }

    return 0;
}

另一种python,没有验证

作者:不拉的彼得
链接:https://www.zhihu.com/question/410539161/answer/2483999907
来源:知乎
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。

# coding:utf-8
'''
图像HDR,主要用于暗光照的增强,python实现
创新点:只对缺失光照的地方进行补光增强,而对光照充足的区域影响较小
'''

from datetime import datetime
import numpy as np
import cv2
from math import ceil
from scipy.sparse import spdiags
#from scipy.misc import imresize
from scipy.optimize import fminbound
from scipy.stats import entropy
from scipy.sparse.linalg import spsolve

filepath = './image/'

from PIL import Image
def scipy_misc_imresize(arr, size, interp='bilinear', mode=None):
    #print(arr.dtype)
    #exit()
    #arr = np.uint8(arr)
    im = Image.fromarray(arr, mode=mode)
    ts = type(size)
    if np.issubdtype(ts, np.signedinteger):
        percent = size / 100.0
        size = tuple((np.array(im.size)*percent).astype(int))
    elif np.issubdtype(type(size), np.floating):
        size = tuple((np.array(im.size)*size).astype(int))
    else:
        size = (size[1], size[0])
    func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3}
    imnew = im.resize(size, resample=func[interp]) # 调用PIL库中的resize函数
    return np.array(imnew)

def computeTextureWeights(fin, sigma, sharpness):
    # print(fin)
    # fin = fin / 255.0

    dt0_v = np.diff(fin, 1, 0)  # 垂直差分
    dt0_v = np.concatenate((dt0_v, fin[:1, :] - fin[-1:, :]), axis=0)  # 第0行减去最后一行

    dt0_h = np.diff(fin, 1, 1)  # 水平差分
    dt0_h = np.concatenate((dt0_h, fin[:, :1] - fin[:, -1:]), axis=1)  # 第0列减去最后一列

    gauker_h = cv2.filter2D(dt0_h, -1, np.ones((1, sigma)), borderType=cv2.BORDER_CONSTANT)
    gauker_v = cv2.filter2D(dt0_v, -1, np.ones((sigma, 1)), borderType=cv2.BORDER_CONSTANT)
    # cv2这个filter2D(镜像翻转)与MATLAB的filter2(补0)不同

    W_h = 1.0 / (abs(gauker_h) * abs(dt0_h) + sharpness)
    W_v = 1.0 / (abs(gauker_v) * abs(dt0_v) + sharpness)

    return W_h, W_v


def convertCol(tmp):  # 按照列转成列。[[1, 2, 3], [4, 5, 6], [7, 8, 9]] # 转成[147258369].T(竖着)
    return np.reshape(tmp.T, (tmp.shape[0] * tmp.shape[1], 1))


def solveLinearEquation(IN, wx, wy, lambd):
    print('IN', IN.shape)
    r, c, ch = IN.shape[0], IN.shape[1], 1
    k = r * c
    dx = -lambd * convertCol(wx)  # 按列转成一列
    dy = -lambd * convertCol(wy)
    tempx = np.concatenate((wx[:, -1:], wx[:, 0:-1]), 1)  # 最后一列插入到第一列前面
    tempy = np.concatenate((wy[-1:, :], wy[0:-1, :]), 0)  # 最后一行插入到第一行前面
    dxa = -lambd * convertCol(tempx)
    dya = -lambd * convertCol(tempy)
    tempx = np.concatenate((wx[:, -1:], np.zeros((r, c - 1))), 1)  # 取wx最后一列放在第一列,其他为0
    tempy = np.concatenate((wy[-1:, :], np.zeros((r - 1, c))), 0)  # 取wy最后一行放在第一行,其他为0
    dxd1 = -lambd * convertCol(tempx)
    dyd1 = -lambd * convertCol(tempy)
    wx[:, -1:] = 0  # 最后一列置为0
    wy[-1:, :] = 0  # 最后一行置为0
    dxd2 = -lambd * convertCol(wx)
    dyd2 = -lambd * convertCol(wy)

    Ax = spdiags(np.concatenate((dxd1, dxd2), 1).T, np.array([-k + r, -r]), k, k)
    Ay = spdiags(np.concatenate((dyd1, dyd2), 1).T, np.array([-r + 1, -1]), k, k)
    # spdiags,与MATLAB不同,scipy是根据行来构造sp,而matlab是根据列来构造sp

    D = 1 - (dx + dy + dxa + dya)
    A = (Ax + Ay) + (Ax + Ay).T + spdiags(D.T, np.array([0]), k, k)

    A = A / 1000.0  # 需修改

    matCol = convertCol(IN)
    print('spsolve开始', str(datetime.now()))
    OUT = spsolve(A, matCol, permc_spec="MMD_AT_PLUS_A")
    print('spsolve结束', str(datetime.now()))
    OUT = OUT / 1000
    OUT = np.reshape(OUT, (c, r)).T
    return OUT


def tsmooth(I, lambd=0.5, sigma=5, sharpness=0.001):
    # print(I.shape)
    wx, wy = computeTextureWeights(I, sigma, sharpness)
    S = solveLinearEquation(I, wx, wy, lambd)
    return S


def rgb2gm(I):
    print('I', I.shape)
    # I = np.maximum(I, 0.0)
    if I.shape[2] and I.shape[2] == 3:
        I = np.power(np.multiply(np.multiply(I[:, :, 0], I[:, :, 1]), I[:, :, 2]), (1.0 / 3))
    return I


def YisBad(Y, isBad):  # 此处需要修改得更高效
    return Y[isBad >= 1]
    # Z = []
    # [rows, cols] = Y.shape
    # for i in range(rows):
    #     for j in range(cols):
    #         if isBad[i, j] >= 122:
    #             Z.append(Y[i, j])
    # return np.array([Z]).T


def applyK(I, k, a=-0.3293, b=1.1258):
    # print(type(I))
    if not type(I) == 'numpy.ndarray':
        I = np.array(I)
    # print(type(I))
    beta = np.exp((1 - (k ** a)) * b)
    gamma = (k ** a)
    BTF = np.power(I, gamma) * beta
    # try:
    #    BTF = (I ** gamma) * beta
    # except:
    #    print('gamma:', gamma, '---beta:', beta)
    #    BTF = I
    return BTF


def maxEntropyEnhance(I, isBad, mink=1, maxk=10):
    # Y = rgb2gm(np.real(np.maximum(imresize(I, (50, 50), interp='bicubic') / 255.0, 0)))
    #Y = scipy_misc_imresize(I, (50, 50), interp='bicubic') / 255.0
    #------------------replace-----------------------#
    Y = np.array(Image.fromarray(np.uint8(I)).resize((50,50)))
    Y = rgb2gm(Y)
    # bicubic较为接近
    # Y = rgb2gm(np.real(np.maximum(cv2.resize(I, (50, 50), interpolation=cv2.INTER_LANCZOS4  ), 0)))
    # INTER_AREA 较为接近
    # import matplotlib.pyplot as plt
    # plt.imshow(Y, cmap='gray');plt.show()

    print('isBad', isBad.shape)
    #isBad = scipy_misc_imresize(isBad.astype(int), (50, 50), interp='nearest')
        #------------------replace-----------------------#
    isBad = np.array(Image.fromarray(np.uint8(isBad)).resize((50,50)))
    print('isBad', isBad.shape)

    # plt.imshow(isBad, cmap='gray');plt.show()

    # 取出isBad为真的Y的值,形成一个列向量Y
    # Y = YisBad(Y, isBad)  # 此处需要修改得更高效
    Y = Y[isBad >= 1]

    # Y = sorted(Y)

    print('-entropy(Y)', -entropy(Y))

    def f(k):
        return -entropy(applyK(Y, k))

    # opt_k = mink
    # k = mink
    # minF = f(k)
    # while k<= maxk:
    #     k+=0.0001
    #     if f(k)<minF:
    #         minF = f(k)
    #         opt_k = k
    opt_k = fminbound(f, mink, maxk)
    print('opt_k:', opt_k)
    # opt_k = 5.363584
    # opt_k = 0.499993757705
    # optk有问题,fminbound正确,f正确,推测Y不一样导致不对
    print('opt_k:', opt_k)
    J = applyK(I, opt_k) - 0.01
    return J


def HDR2dark(I, t_our, W):  # 过亮的地方变暗
    W = 1 - W
    I3 = I * W
    isBad = t_our > 0.8
    J3 = maxEntropyEnhance(I, isBad, 0.1, 0.5)  # 求k和曝光图
    J3 = J3 * (1 - W)  # 曝光图*权重
    fused = I3 + J3  # 增强图
    return I


def oneHDR(I, mu=0.5, a=-0.3293, b=1.1258):
    # mu照度图T的指数,数值越大,增强程度越大
    I = I / 255.0
    t_b = I[:, :, 0]  # t_b表示三通道图转成灰度图(灰度值为RGB中的最大值),亮度矩阵L
    for i in range(I.shape[2] - 1):  # 防止输入图片非三通道
        t_b = np.maximum(t_b, I[:, :, i + 1])
    # t_b2 = cv2.resize(t_b, (0, 0), fx=0.5, fy=0.5)
    print('t_b', t_b.shape)
    # t_b2 = misc.imresize(t_b, (ceil(t_b.shape[0] / 2), ceil(t_b.shape[1] / 2)),interp='bicubic')
    # print('t_b2', t_b2.shape)
    # t_b2 = t_b / 255.0

    #t_b2 = scipy_misc_imresize(t_b, (256, 256), interp='bicubic', mode='F')  # / 255
        #------------------replace-----------------------#
    t_b2 = np.array(Image.fromarray(t_b).resize((256,256)))
    t_our = tsmooth(t_b2)  # 求解照度图T(灰度图)
    print('t_our前', t_our.shape)
    #t_our = scipy_misc_imresize(t_our, t_b.shape, interp='bicubic', mode='F')  # / 255
        #------------------replace-----------------------#
    t_our = np.array(Image.fromarray(t_our).resize((t_b.shape[1],t_b.shape[0])))
    print('t_our后', t_our.shape)

    # W: Weight Matrix 与 I2
    # 照度图L(灰度图) ->  照度图L(RGB图):灰度值重复3次赋给RGB
    # size为(I, 3) , 防止与原图尺寸有偏差
    t = np.ndarray(I.shape)
    for ii in range(I.shape[2]):
        t[:, :, ii] = t_our
    print('t', t.shape)

    W = t ** mu  # 原图的权重。三维矩阵

    cv2.imwrite(filepath + 'W.jpg', W * 255)
    cv2.imwrite(filepath + '1-W.jpg', (1 - W) * 255)
    cv2.imwrite(filepath + 't.jpg', t * 255)
    cv2.imwrite(filepath + '1-t.jpg', (1 - t) * 255)

    print('W', W.shape)
    # 变暗
    # isBad = t_our > 0.8  # 是高光照的像素点
    # I = maxEntropyEnhance(I, isBad)  # 求k和曝光图
    # 变暗
    I2 = I * W  # 原图*权重
    # 曝光率->k ->J
    isBad = t_our < 0.5  # 是低光照的像素点
    J = maxEntropyEnhance(I, isBad)  # 求k和曝光图
    J2 = J * (1 - W)  # 曝光图*权重
    fused = I2 + J2  # 增强图
    # 存储中间结果
    cv2.imwrite(filepath + 'I2.jpg', I2 * 255.0)
    cv2.imwrite(filepath + 'J2.jpg', J2 * 255.0)
    # 变暗
    # fused = HDR2dark(fused, t_our, W)
    return fused
    # return res

def test():
    inputImg = cv2.imread(filepath + '344.jpg')
    outputImg = oneHDR(inputImg)
    # outputImg = outputImg * 255.0
    cv2.imwrite(filepath + '1out.bmp', outputImg * 255)
    print("HDR完成,已保存到本地")
    print('程序结束', str(datetime.now()))

    cv2.imshow('inputImg', inputImg)
    cv2.imshow('outputImg', outputImg)
    # print(inputImg.dtype,outputImg.dtype)
    # outputImg = outputImg.astype(int)
    # print(inputImg.dtype, outputImg.dtype)
    # compare = np.concatenate((inputImg,outputImg),axis=1)
    # cv2.imshow('compare', compare)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


def test2():
    A = np.array([1, 2, 3, 4])
    B = np.array([1, 0, 0, 1])
    print(A[B>0])


if __name__ == '__main__':
    print('程序开始', str(datetime.now()))
    test()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值