HDR算法Python实现:Github。先看结果:
使用三脚架用自己手机分别设置-3、0、1 EV拍摄的输入图像:
输出结果(左:0ev,右:HDR结果):
main.py:
from image_io import load_img, save_img
import os
from exposure_fusion import exposure_fusion
input_path = 'data/case5' # 输入的不同曝光的图片
save_img_name = ''.join(['hdr_output_', os.path.split(input_path)[-1], '.png']) # output图片文件名
nlev = 9 # 金字塔层数,不能设置过小,否则引起分层
input_path_list = []
for i in os.listdir(input_path):
input_path_list.append(os.path.join(input_path, i))
I = load_img(input_path_list)
R = exposure_fusion(I, [1, 1, 1, nlev])
save_img(R * 255, save_img_name)
image_io.py:
import cv2
import numpy as np
import math
def load_img(imgs_path_list):
imgs_arr = []
for img_path in imgs_path_list:
img_arr = cv2.imread(img_path)
imgs_arr.append(img_arr/255)
return imgs_arr
def save_img(img_arr, save_path):
cv2.imwrite(save_path, img_arr)
def offset_process(imgs_arr, nlev):
# 图像扩边,防止金字塔采样时出现奇数宽高
size = np.shape(imgs_arr[0])
h = size[0]
w = size[1]
last_h = h/(2**(nlev-1))
last_w = w/(2**(nlev-1))
offset = [0, 0, 0, 0]
new_h = math.ceil(last_h) * (2**(nlev-1))
new_w = math.ceil(last_w) * (2**(nlev-1))
total_add_h = new_h - h
total_add_w = new_w - w
ahead_add_h = total_add_h // 2
ahead_add_w = total_add_w // 2
behind_add_h = total_add_h - ahead_add_h
behind_add_w = total_add_w - ahead_add_w
for idx in range(len(imgs_arr)):
imgs_arr[idx] = cv2.copyMakeBorder(imgs_arr[idx], ahead_add_h, behind_add_h, ahead_add_w, behind_add_w, cv2.BORDER_REFLECT)
offset[0] = ahead_add_h
offset[1] = behind_add_h
offset[2] = ahead_add_w
offset[3] = behind_add_w
return imgs_arr, offset
exposure_fusion.py:
import numpy as np
from gaussian_pyramid import gaussian_pyramid
from laplacian_pyramid import laplacian_pyramid
from image_io import offset_process
from reconstruct_laplacian_pyramid import reconstruct_laplacian_pyramid
from cv2_process import rgb2gray, imfilter
import math
def exposure_fusion(imgs_arr, param):
contrast_parm = param[0]
sat_parm = param[1]
wexp_parm = param[2]
nlev = param[3]
size = np.shape(imgs_arr)
r = size[1]
c = size[2]
N = size[0]
max_lev = math.floor(math.log2(min(r, c)))
if nlev > max_lev:
nlev = max_lev
# 图像四周扩边处理,让金字塔每层采样都为偶数
imgs_arr, offset = offset_process(imgs_arr, nlev)
size = np.shape(imgs_arr)
r = size[1]
c = size[2]
W = np.ones([N, r, c])
if contrast_parm == 1:
W = (W * contrast(imgs_arr, size)) ** contrast_parm
if sat_parm == 1:
W = (W * saturation(imgs_arr, size)) ** sat_parm
if wexp_parm == 1:
W = (W * well_exposedness(imgs_arr, size)) ** wexp_parm
W = W + 1e-12
yuan_W = W
W = np.sum(W, axis=0)
temp_W = np.expand_dims(W, axis=0)
W = np.concatenate([temp_W for i in range(N)], axis=0)
W = yuan_W / W
pyr = gaussian_pyramid(np.zeros([r, c, 3]), nlev)
for i in range(N):
pyrW= gaussian_pyramid(W[i, :, :], nlev)
pyrI = laplacian_pyramid(imgs_arr[i], nlev)
for l in range(nlev):
W_temp = np.expand_dims(pyrW[l], axis=-1)
w = np.concatenate([W_temp, W_temp, W_temp], axis=-1)
pyr[l] = pyr[l] + w * pyrI[l]
R = reconstruct_laplacian_pyramid(pyr)
# 将扩充的四周边缘像素舍弃
R = R[offset[0]:r-offset[1], offset[2]:c-offset[3], :]
return R
def contrast(I, size):
h = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])
N = size[0]
C = np.zeros([N, size[1], size[2]])
for i in range(N):
temp = 255 * I[i] / (I[i].max() - I[i].min())
mono = rgb2gray(temp.astype('float32'))
C[i, :, :] = np.abs(imfilter(mono, h))
return C
def saturation(I, size):
N = size[0]
C = np.zeros([N, size[1], size[2]])
for i in range(N):
B = I[i][:, :, 0]
G = I[i][:, :, 1]
R = I[i][:, :, 2]
mu = (B+R+G) / 3
C[i, :, :] = np.sqrt(((R-mu)**2 + (G-mu)**2 + (B-mu)**2)/3)
return C
def well_exposedness(I, size):
N = size[0]
C = np.zeros([N, size[1], size[2]])
sig = 0.2
for i in range(N):
B = np.exp(-0.5*I[i][:, :, 0] - 0.5)**2/sig**2
G = np.exp(-0.5*I[i][:, :, 1] - 0.5)**2/sig**2
R = np.exp(-0.5*I[i][:, :, 2] - 0.5)**2/sig**2
C[i, :, :] = B * G * R
return C
gaussian_pyramid.py:
from cv2_process import downsample
def gaussian_pyramid(I, nlev):
pyr = [I]
for l in range(1, nlev):
I = downsample(I)
pyr.append(I)
return pyr
laplacian_pyramid.py
from cv2_process import downsample, upsample
def laplacian_pyramid(I, nlev):
pyr = []
J = I
for l in range(nlev-1):
I = downsample(J)
pyr.append(J - upsample(I))
J = I
pyr.append(J)
return pyr
reconstruct_laplacian_pyramid.py
from cv2_process import upsample
def reconstruct_laplacian_pyramid(pyr):
nlev = len(pyr)
R = pyr[nlev-1]
for l in range(nlev-2, -1, -1):
R = pyr[l] + upsample(R)
return R
cv2_process.py:
import cv2
def downsample(I):
return cv2.resize(I, dsize=(0, 0), fx=0.5, fy=0.5)
def upsample(I):
return cv2.resize(I, dsize=(0, 0), fx=2, fy=2)
def rgb2gray(I):
img_gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
return img_gray / 255
def imfilter(I, h):
return cv2.filter2D(I, -1, h)
def contrast_clahe(I):
b, g, r = cv2.split(I)
clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(8, 8))
b = clahe.apply(b)
g = clahe.apply(g)
r = clahe.apply(r)
image = cv2.merge([b, g, r])
return image
其他结果1:
输入:
输出:
其他结果2:
输入:
输出:
参考:
[1] Mertens T, Kautz J, Van Reeth F. Exposure fusion[C]//15th Pacific Conference on Computer Graphics and Applications (PG’07). IEEE, 2007: 382-390.
[2] https://github.com/Mericam/exposure-fusion