快速引导滤波代码实现与测试(fast guided filter)

公式推导

参考:https://blog.csdn.net/bby1987/article/details/128138418

(一篇博客的字数好像有限制,只能把原理和公式分开两篇写)

代码实现

算法代码

下面代码起名为fastguidedfilter.py
需特别注意:guide_filter_color模式比guide_filter_gray模式的计算量大很多倍。。。

# -*- coding: utf-8 -*-
import cv2
import numpy as np


def boxfilter(image, radius):
    ksize = (2 * radius + 1, 2 * radius + 1)
    filtered_image = cv2.boxFilter(image, -1, ksize,
                                   borderType=cv2.BORDER_REPLICATE)
    return filtered_image


def guide_filter_gray(I, P, radius, step, eps):
    """
    Fast guide filter for gray-scale guidance image

    Parameters
    ----------
    I: gray-scale guidance image (single channel)
    P: input image, may be gray-scale or colorful
    radius: radius for box-filter
    step: step for down sample
    eps: regularization factor
    """
    # check parameters
    I = np.squeeze(I)
    P = np.squeeze(P)
    if I.ndim != 2:
        raise ValueError("guidance image must be gray-scale.")

    # cache original data type
    original_data_type = P.dtype

    # change data type to float32
    I = np.float32(I)
    P = np.float32(P)

    # initialize output
    result = P.copy()

    # if P is color image, repeat I by 3 times in channel dim
    # because ndarray can NOT be broadcasted from [H, W] to [H, W, C]
    if P.ndim == 3 and P.shape[2] >= 3:
        I = np.expand_dims(I, axis=2).repeat(3, axis=2)
        if P.shape[2] > 3:
            P = P[..., :3]

    # down sample
    height, width = I.shape[:2]
    down_size = (width // step, height // step)
    I_down = cv2.resize(I, dsize=down_size, fx=None, fy=None,
                        interpolation=cv2.INTER_NEAREST)
    P_down = cv2.resize(P, dsize=down_size, fx=None, fy=None,
                        interpolation=cv2.INTER_NEAREST)
    radius_down = radius // step

    # guide filter
    mean_I = boxfilter(I_down, radius_down)
    mean_P = boxfilter(P_down, radius_down)
    corr_I = boxfilter(I_down * I_down, radius_down)
    corr_IP = boxfilter(I_down * P_down, radius_down)

    var_I = corr_I - mean_I * mean_I
    cov_IP = corr_IP - mean_I * mean_P

    a = cov_IP / (var_I + eps)
    b = mean_P - a * mean_I

    mean_a = boxfilter(a, radius_down)
    mean_b = boxfilter(b, radius_down)

    # up sample
    mean_a_up = cv2.resize(mean_a, dsize=(width, height), fx=None,
                           fy=None, interpolation=cv2.INTER_LINEAR)
    mean_b_up = cv2.resize(mean_b, dsize=(width, height), fx=None,
                           fy=None, interpolation=cv2.INTER_LINEAR)

    # linear filter model
    gf_result = mean_a_up * I + mean_b_up
    if P.ndim == 3 and P.shape[2] > 3:
        result[..., :3] = gf_result
    else:
        result = gf_result

    # post process data type
    if original_data_type == np.uint8:
        result = np.clip(np.round(result), 0, 255).astype(np.uint8)
    return result


def guide_filter_color(I, P, radius, step, eps):
    """
    Fast guide filter for colorful guidance image

    Parameters
    ----------
    I: colorful guidance image (3 channels)
    P: input image, may be gray-scale or colorful
    radius: radius for box-filter
    step: step for down sample
    eps: regularization factor
    """
    # check parameters
    I = np.squeeze(I)
    P = np.squeeze(P)
    if I.ndim < 3 or I.shape[2] != 3:
        raise ValueError("guidance image must have 3 channels.")

    # cache original data type
    original_data_type = P.dtype

    # change data type to float32
    I = np.float32(I)
    P = np.float32(P)

    # initialize result
    result = P.copy()
    if result.ndim == 2:
        result = np.expand_dims(result, axis=2)

    # down sample
    height, width = I.shape[:2]
    down_size = (width // step, height // step)
    I_down = cv2.resize(I, dsize=down_size, fx=None, fy=None,
                        interpolation=cv2.INTER_NEAREST)
    P_down = cv2.resize(P, dsize=down_size, fx=None, fy=None,
                        interpolation=cv2.INTER_NEAREST)
    radius_down = radius // step

    # guide filter - processing guidance image I
    mean_I = boxfilter(I_down, radius_down)

    var_I_00 = boxfilter(I_down[..., 0] * I_down[..., 0], radius_down) - \
               mean_I[..., 0] * mean_I[..., 0] + eps
    var_I_11 = boxfilter(I_down[..., 1] * I_down[..., 1], radius_down) - \
               mean_I[..., 1] * mean_I[..., 1] + eps
    var_I_22 = boxfilter(I_down[..., 2] * I_down[..., 2], radius_down) - \
               mean_I[..., 2] * mean_I[..., 2] + eps
    var_I_01 = boxfilter(I_down[..., 0] * I_down[..., 1], radius_down) - \
               mean_I[..., 0] * mean_I[..., 1]
    var_I_02 = boxfilter(I_down[..., 0] * I_down[..., 2], radius_down) - \
               mean_I[..., 0] * mean_I[..., 2]
    var_I_12 = boxfilter(I_down[..., 1] * I_down[..., 2], radius_down) - \
               mean_I[..., 1] * mean_I[..., 2]

    inv_00 = var_I_11 * var_I_22 - var_I_12 * var_I_12
    inv_11 = var_I_00 * var_I_22 - var_I_02 * var_I_02
    inv_22 = var_I_00 * var_I_11 - var_I_01 * var_I_01
    inv_01 = var_I_02 * var_I_12 - var_I_01 * var_I_22
    inv_02 = var_I_01 * var_I_12 - var_I_02 * var_I_11
    inv_12 = var_I_02 * var_I_01 - var_I_00 * var_I_12

    det = var_I_00 * inv_00 + var_I_01 * inv_01 + var_I_02 * inv_02

    inv_00 = inv_00 / det
    inv_11 = inv_11 / det
    inv_22 = inv_22 / det
    inv_01 = inv_01 / det
    inv_02 = inv_02 / det
    inv_12 = inv_12 / det

    # guide filter - filter input image P for every single channel
    mean_P = boxfilter(P_down, radius_down)
    if mean_P.ndim == 2:
        mean_P = np.expand_dims(mean_P, axis=[2])
        P_down = np.expand_dims(P_down, axis=[2])

    channels = np.min([3, mean_P.shape[2]])
    for ch in range(channels):
        mean_P_channel = mean_P[..., ch:ch + 1]
        P_channel = P_down[..., ch:ch + 1]
        mean_Ip = boxfilter(I_down * P_channel, radius_down)
        cov_Ip = mean_Ip - mean_I * mean_P_channel

        a0 = inv_00 * cov_Ip[..., 0] + inv_01 * cov_Ip[..., 1] + \
             inv_02 * cov_Ip[..., 2]
        a1 = inv_01 * cov_Ip[..., 0] + inv_11 * cov_Ip[..., 1] + \
             inv_12 * cov_Ip[..., 2]
        a2 = inv_02 * cov_Ip[..., 0] + inv_12 * cov_Ip[..., 1] + \
             inv_22 * cov_Ip[..., 2]
        b = mean_P[..., ch] - a0 * mean_I[..., 0] - a1 * mean_I[..., 1] - \
            a2 * mean_I[..., 2]
        a = np.concatenate((a0[..., np.newaxis], a1[..., np.newaxis],
                            a2[..., np.newaxis]), axis=2)

        mean_a = boxfilter(a, radius_down)
        mean_b = boxfilter(b, radius_down)

        mean_a_up = cv2.resize(mean_a, dsize=(width, height), fx=None,
                               fy=None, interpolation=cv2.INTER_LINEAR)
        mean_b_up = cv2.resize(mean_b, dsize=(width, height), fx=None,
                               fy=None, interpolation=cv2.INTER_LINEAR)
        gf_one_channel = np.sum(mean_a_up * I, axis=2) + mean_b_up
        result[..., ch] = gf_one_channel

    # post process data type
    result = np.squeeze(result)
    if original_data_type == np.uint8:
        result = np.clip(np.round(result), 0, 255).astype(np.uint8)
    return result

应用举例

去噪

测试数据使用的是开源数据集FFHQ中的一张图,00116.png,包含如下元素:

  • 人像/人脸
  • 彩噪
  • 衣服上的格子及毛绒纹理
  • 色彩条带
  • 平坦区域:墙面,窗户,人脸等
  • 高梯度边缘:窗户棱,人脸边缘,衣服边缘等

(因为最大允许上传的图片是5M,所以处理后再转为jpg,可能有一些压缩痕迹)

  • rgb系列的输出如下:
    上排依次是:原图 —— 引导=灰度,输入=灰度 —— 引导=灰度,输入=rgb
    下排一次是:引导=rgb,输入=灰度 —— 引导=rgb,输入=rgb —— rgb各通道分别对应滤波
    (文件名的含义需要在下面代码里面对应起来看)
    在这里插入图片描述

  • yuv系列的输出如下:
    上排依次是:原图 —— yuv只对y通道做滤波
    下排依次是:引导=yuv,输入=yuv —— yuv各通道分别对应滤波
    在这里插入图片描述
    结论:

  • guidefilter在降噪的同时边缘保持确实不错,图像中窗户棱,人脸边缘,衣服纹理等没有发生明显模糊。

  • yuv各通道分别对应滤波(yuv结果右下)在效果和计算量方面综合最优。

  • 引导=rgb,输入=rgb需要的计算量非常大,而其效果虽好,但相比yuv各通道分别对应滤波没有明显优势。

  • 当引导图是多通道时,比如引导图=rgb或yuv,比灰度引导图带来的保边效果更好,特别的,引导=灰度,输入=rgb情况下会带来严重色偏(观察rgb结果右上的色彩条带)。

  • rgb各通道分别对应滤波,或者yuv只对y通道做滤波不能压制彩噪。

# -*- coding: utf-8 -*-
import cv2
from fastguidedfilter import guide_filter_gray
from fastguidedfilter import guide_filter_color


def rgb_gray_guidance():
    scale = 255
    step = 4
    radius = 16
    eps = 0.02 * 0.02 * scale * scale

    image = cv2.imread(r"00116.png", cv2.IMREAD_UNCHANGED)

    I = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    P_gray = I.copy()
    P_color = image.copy()

    gray_scale_gf = guide_filter_gray(I, P_gray, radius, step, eps)
    color_gf = guide_filter_gray(I, P_color, radius, step, eps)

    cv2.imwrite("gray_guidance_gray_input.png", gray_scale_gf)
    cv2.imwrite("gray_guidance_rgb_input.png", color_gf)


def rgb_color_guidance():
    scale = 255
    step = 4
    radius = 16
    eps = 0.02 * 0.02 * scale * scale

    image = cv2.imread(r"00116.png", cv2.IMREAD_UNCHANGED)

    I = image.copy()
    P_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    P_color = image.copy()

    gray_scale_gf = guide_filter_color(I, P_gray, radius, step, eps)
    color_gf = guide_filter_color(I, P_color, radius, step, eps)

    cv2.imwrite("rgb_guidance_gray_input.png", gray_scale_gf)
    cv2.imwrite("rgb_guidance_rgb_input.png", color_gf)


def rgb_channel_wise():
    scale = 255
    step = 4
    radius = 16
    eps = 0.02 * 0.02 * scale * scale

    image = cv2.imread(r"00116.png", cv2.IMREAD_UNCHANGED)

    for ch in range(3):
        channel = image[..., ch]
        gf_channel = guide_filter_gray(channel, channel, radius, step, eps)
        image[..., ch] = gf_channel

    cv2.imwrite("rgb_channel_wise.png", image)


def yuv_y_channel():
    scale = 255
    step = 4
    radius = 16
    eps = 0.02 * 0.02 * scale * scale

    image = cv2.imread(r"00116.png", cv2.IMREAD_UNCHANGED)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
    Y = image[..., 0]

    Y_gf = guide_filter_gray(Y, Y, radius, step, eps)

    image[..., 0] = Y_gf
    image = cv2.cvtColor(image, cv2.COLOR_YUV2BGR)
    cv2.imwrite("yuv_y_channel.png", image)


def yuv_3_channels():
    scale = 255
    step = 4
    radius = 16
    eps = 0.02 * 0.02 * scale * scale

    image = cv2.imread(r"00116.png", cv2.IMREAD_UNCHANGED)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)

    image = guide_filter_color(image, image, radius, step, eps)

    image = cv2.cvtColor(image, cv2.COLOR_YUV2BGR)
    cv2.imwrite("yuv_3_channels.png", image)


def yuv_channel_wise():
    scale = 255
    step = 4
    radius = 16
    eps = 0.02 * 0.02 * scale * scale

    image = cv2.imread(r"00116.png", cv2.IMREAD_UNCHANGED)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)

    for ch in range(3):
        channel = image[..., ch]
        gf_channel = guide_filter_gray(channel, channel, radius, step, eps)
        image[..., ch] = gf_channel

    image = cv2.cvtColor(image, cv2.COLOR_YUV2BGR)
    cv2.imwrite("yuv_channel_wise.png", image)


if __name__ == '__main__':
    rgb_gray_guidance()
    rgb_color_guidance()
    rgb_channel_wise()
    yuv_y_channel()
    yuv_3_channels()
    yuv_channel_wise()

磨皮

保边滤波器很适合用来磨皮,因为在保留大的边缘结构的同时,可以抹除细节纹理,特别是平坦区域的细节纹理,而皮肤又属于相对平坦的区域。
注意想要改变磨皮程度,改正则化参数eps比改半径radius更合适。

仍然使用开源数据集FFHQ中的00276.png
结果如下:
上排依次是:原图 —— eps=0.02*0.02
下排依次是:eps=0.04*0.04 —— eps=0.08*0.08
在这里插入图片描述

代码如下:

# -*- coding: utf-8 -*-
import cv2
import numpy as np
from fastguidedfilter import guide_filter_color


def smooth_skin():
    xmin, ymin, xmax, ymax = 1400, 0, 3200, 2000
    scale = 255
    step = 4
    radius = 16

    image = cv2.imread(r"00276.png", cv2.IMREAD_UNCHANGED)

    eps = 0.02 * 0.02 * scale * scale
    gf_002 = guide_filter_color(image, image, radius, step, eps)

    eps = 0.04 * 0.04 * scale * scale
    gf_004 = guide_filter_color(image, image, radius, step, eps)

    eps = 0.08 * 0.08 * scale * scale
    gf_008 = guide_filter_color(image, image, radius, step, eps)

    image = image[ymin:ymax, xmin:xmax]
    gf_002 = gf_002[ymin:ymax, xmin:xmax]
    gf_004 = gf_004[ymin:ymax, xmin:xmax]
    gf_008 = gf_008[ymin:ymax, xmin:xmax]

    image_up = np.concatenate([image, gf_002], axis=1)
    image_down = np.concatenate([gf_004, gf_008], axis=1)
    image_all = np.concatenate([image_up, image_down], axis=0)
    cv2.imwrite(r'./smooth_skin.jpg', image_all)


if __name__ == '__main__':
    smooth_skin()

图像融合 - 亮度调整

有时候我们做图像融合后,为了使结果图像的亮度不发生整体变化,需要再调节回来,这时候可以用guidedfilter来实现。
比如下面例子,想要把一个卷帘门的纹理融合到兰陵王里面(左上融合到右上中),直接以乘法融合得到左下角,图像亮度发生整体变暗,此时使用左下作为引导图,右上作为输入图,以非常大的radius,小eps实施guidedfilter,就可以得到右下角这种亮度没有整体变化,又融合了纹理的结果。
在这里插入图片描述
代码如下:
注意:radius要非常大,radius小了的话会破坏纹理结构;eps要很小,甚至为0,eps大了会导致模糊。

# -*- coding: utf-8 -*-
import cv2
import numpy as np
from fastguidedfilter import guide_filter_color


def make_data():
    scale = 255.
    step = 4
    radius = 1000
    eps = 0

    image = cv2.imread(r'lanlingwang.png')
    image = cv2.resize(image, dsize=None, fx=0.5, fy=0.5,
                       interpolation=cv2.INTER_LINEAR)
    height, width = image.shape[:2]
    texture = cv2.imread(r'texture.jpg')
    texture = cv2.resize(texture, dsize=(width, height),
                       interpolation=cv2.INTER_LINEAR)
    image_up = np.concatenate([texture, image], axis=1)

    image = image / scale
    texture = texture / scale
    fused_image = image * texture

    gf = guide_filter_color(fused_image, image, radius, step, eps)

    fused_image = np.clip(np.round(fused_image * 255), 0, 255).astype(np.uint8)
    gf = np.clip(np.round(gf * 255), 0, 255).astype(np.uint8)

    image_down = np.concatenate([fused_image, gf], axis=1)
    image_all = np.concatenate([image_up, image_down], axis=0)
    cv2.imwrite(r'./make_data.jpg', image_all)


if __name__ == '__main__':
    make_data()

  • 4
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
引导滤波Guided Image Filtering)是一种能够保留图像细节的图像滤波方法,通过引导图像的辅助作用,对待处理图像进行滤波。其主要思想是根据引导图像的特征来调整滤波器的权重,从而使得滤波器更加适应于图像的结构和纹理特征,达到保留细节的效果。 具体实现方法如下: 1. 对待处理图像和引导图像进行预处理,计算它们的均值和方差。 2. 对引导图像进行高斯滤波,得到平滑后的引导图像。 3. 计算待处理图像和引导图像的协方差,并计算得到待处理图像的均值和方差。 4. 计算待处理图像和引导图像的相关系数,并根据相关系数和平滑后的引导图像计算得到滤波器的权重。 5. 根据滤波器的权重和待处理图像的均值、方差,对待处理图像进行滤波。 下面是引导滤波的Matlab代码实现: ```matlab function [q] = guidedfilter(I, p, r, eps) % guidedfilter: Guided image filtering % % Input: % - I: guidance image (should be a gray-scale/single channel image) % - p: filtering input image % - r: radius of filter % - eps: regularization parameter % % Output: % - q: filtering output image % % Reference: % Kaiming He, Jian Sun, and Xiaoou Tang, "Guided Image Filtering," % IEEE Transactions on Pattern Analysis and Machine Intelligence, % Vol. 35, No. 6, pp. 1397-1409, June 2013. % % Author: hqli % Email: hqli@pku.edu.cn % Date: 2016-11-05 % % Check inputs if (ndims(I)~=2) error('The guidance image should be a gray-scale/single channel image.'); end if (ndims(p)==2) % Single-channel image [hei, wid] = size(p); nCh = 1; else % Multi-channel image [hei, wid, nCh] = size(p); end if (size(I,1)~=hei || size(I,2)~=wid) error('The size of the guidance image should be the same as the input image.'); end % Compute mean and covariance matrices mean_I = imboxfilt(I, r) ./ (r^2); mean_p = zeros(hei, wid, nCh); for ii=1:nCh mean_p(:,:,ii) = imboxfilt(p(:,:,ii), r) ./ (r^2); end mean_Ip = zeros(hei, wid, nCh); for ii=1:nCh mean_Ip(:,:,ii) = imboxfilt(I.*p(:,:,ii), r) ./ (r^2); end cov_Ip = mean_Ip - mean_I.*mean_p; % Compute local variances and covariances var_I = imboxfilt(I.^2, r) ./ (r^2) - mean_I.^2; var_p = zeros(hei, wid, nCh); for ii=1:nCh var_p(:,:,ii) = imboxfilt(p(:,:,ii).^2, r) ./ (r^2) - mean_p(:,:,ii).^2; end % Compute weight and bias a = zeros(hei, wid, nCh); b = zeros(hei, wid, nCh); for ii=1:nCh a(:,:,ii) = cov_Ip(:,:,ii) ./ (var_I + eps); b(:,:,ii) = mean_p(:,:,ii) - a(:,:,ii) .* mean_I; end % Compute the filtering output q = zeros(size(p)); for ii=1:nCh q(:,:,ii) = imboxfilt(a(:,:,ii).*p(:,:,ii) + b(:,:,ii), r) ./ (r^2); end ``` 其中,I为引导图像,p为待处理图像,r为滤波器的半径,eps为正则化参数。函数返回值q为滤波后的图像。 下面是引导滤波的OpenCV实现: ```c++ cv::Mat guidedFilter(const cv::Mat& I, const cv::Mat& p, int r, double eps) { // Check inputs CV_Assert(I.channels() == 1); CV_Assert(p.channels() == 1 || p.channels() == I.channels()); CV_Assert(I.rows == p.rows && I.cols == p.cols); // Convert input images to CV_64FC1 cv::Mat I_double, p_double; I.convertTo(I_double, CV_64FC1); p.convertTo(p_double, CV_64FC1); // Compute mean and covariance matrices cv::Mat mean_I, mean_p, mean_Ip, cov_Ip, var_I, var_p; cv::boxFilter(I_double, mean_I, CV_64FC1, cv::Size(r, r)); cv::boxFilter(p_double, mean_p, CV_64FC1, cv::Size(r, r)); cv::boxFilter(I_double.mul(p_double), mean_Ip, CV_64FC1, cv::Size(r, r)); cov_Ip = mean_Ip - mean_I.mul(mean_p); cv::boxFilter(I_double.mul(I_double), var_I, CV_64FC1, cv::Size(r, r)); var_I -= mean_I.mul(mean_I); if (p.channels() == 1) { cv::boxFilter(p_double.mul(p_double), var_p, CV_64FC1, cv::Size(r, r)); var_p -= mean_p.mul(mean_p); } else { std::vector<cv::Mat> p_channels(p.channels()); cv::split(p_double, p_channels); var_p = cv::Mat::zeros(I.rows, I.cols, CV_64FC(p.channels())); for (int i = 0; i < p.channels(); i++) { cv::boxFilter(p_channels[i].mul(p_channels[i]), var_p.channels(i), CV_64FC1, cv::Size(r, r)); var_p.channels(i) -= mean_p.channels(i).mul(mean_p.channels(i)); } } // Compute weight and bias cv::Mat a, b; a = cov_Ip / (var_I + eps); b = mean_p - a.mul(mean_I); // Compute the filtering output cv::Mat q; if (p.channels() == 1) { cv::boxFilter(a.mul(p_double) + b, q, CV_64FC1, cv::Size(r, r)); } else { std::vector<cv::Mat> q_channels(p.channels()); for (int i = 0; i < p.channels(); i++) { cv::boxFilter(a.channels(i).mul(p_channels[i]) + b.channels(i), q_channels[i], CV_64FC1, cv::Size(r, r)); } cv::merge(q_channels, q); } return q; } ``` 其中,I为引导图像,p为待处理图像,r为滤波器的半径,eps为正则化参数。函数返回值q为滤波后的图像。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值