反光检测

Automatic segmentation and inpainting of specular highlights for endoscopic imaging 论文反光检测部分pythonn实现

代码写的很乱,可直接运行,后期有时间在整理整理,写写注释

import cv2
import numpy as np
import matplotlib.pyplot as plt
import scipy

from skimage import measure, filters, data

img = cv2.imread('../data/fazhi/95.jpg')
w = img.shape[0]
h = img.shape[1]

# model1
cR = img[:, :, 0]
cG = img[:, :, 1]
cB = img[:, :, 2]

cE = 0.2989 * cR + 0.5870 * cG + 0.1140 * cB


def calc_module1_specular_mask(cE, cG, cB, T1):
    p95_cG = cG * 0.95
    p95_cE = cE * 0.95
    p95_cB = cB * 0.95
    # p95_cG = prctile(cG(:), 95)
    # p95_cB = prctile(cB(:), 95)
    # p95_cE = prctile(cE(:), 95)
    rGE = p95_cG / p95_cE
    rBE = p95_cB / p95_cE
    img_new = np.zeros((w, h, 1))
    for i in range(0, w):
        for j in range(0, h):

            if all([(cG[i][j] > rGE[i][j] * T1), (cB[i][j] > rBE[i][j] * T1), (cE[i][j] > T1)]):
                img_new[i][j] = 255
            else:
                img_new[i][j] = 0

    return img_new


module1_specular_mask = calc_module1_specular_mask(cE, cG, cB, T1=240)
cv2.imwrite('../data/New/95-model1.jpg',module1_specular_mask)


# cv2.imshow('s',img)


def calc_centroid_color_info(specular_mask_T2_abs, img):
    kernel = np.ones((2, 2), np.uint8)
    dilated_mask_1 = cv2.erode(specular_mask_T2_abs, kernel, iterations=1)
    kernel = np.ones((4, 4), np.uint8)
    dilated_mask_2 = cv2.erode(specular_mask_T2_abs, kernel, iterations=1)
    centroid_color_area = dilated_mask_2 - dilated_mask_1
    labeled_area = measure.label(centroid_color_area)  # 连通区域标记
    num_region = max(labeled_area.reshape(labeled_area.shape[0] * labeled_area.shape[1]))
    centroid_color_info = []
    for i in range(1, num_region):
        dict = {}
        [row_index, col_index] = np.where(labeled_area == i)
        num_possible_specular_points = len(row_index)
        dict['centroid_row'] = np.mean(row_index)
        dict['centroid_col'] = np.mean(col_index)
        dict['centroid_color'] = np.mean(img[row_index, col_index, :])  ## 不明确
        centroid_color_info.append(dict)

    return centroid_color_info


def calc_distance(x, y, x1, y1):
    distance_to_centroid = np.sqrt((x - x1) ** 2 + (y - y1) ** 2)
    return distance_to_centroid


def find_the_nearest_region(centroid_color_info, pixel_row, pixel_col):
    num_region = len(centroid_color_info)
    nearset_region_index = 0
    nearset_distance = 1e6
    for j in range(1, num_region):
        distance_to_centroid = calc_distance(pixel_row, pixel_col, centroid_color_info[j]['centroid_row'],
                                             centroid_color_info[j]['centroid_col'])
        if distance_to_centroid < nearset_distance:
            nearset_distance = distance_to_centroid
            nearset_region_index = j

    nearest_region = centroid_color_info[nearset_region_index]
    return nearest_region


def filling_image_using_centroid_color(specular_mask_T2_abs, img):
    # filling possible specular highlights by the centroid color
    centroid_color_info = calc_centroid_color_info(specular_mask_T2_abs, img)
    specular_mask_T2_abss = specular_mask_T2_abs.reshape(specular_mask_T2_abs.shape[0], specular_mask_T2_abs.shape[1])
    [row_index, col_index] = np.where(specular_mask_T2_abss == 255)
    num_possible_specular_points = len(row_index)
    filled_img = img
    for i in range(1, num_possible_specular_points):
        # looking for the nearst centroid color for every specular point
        # and fill it
        nearest_region = find_the_nearest_region(centroid_color_info, row_index[i], col_index[i])
        filled_img[row_index[i], col_index[i], :] = nearest_region['centroid_color']
    return filled_img


def contrast_coeffcient(c):
    mean_c = np.mean(c);
    std_c = np.std(c);
    t = 1 / ((mean_c + std_c) / mean_c)
    return t


def calc_modul2_specular_mask(filled_img, T2_rel, cR, cG, cB):
    R = filled_img[:, :, 0]
    fR = cv2.medianBlur(R, 31)
    fG = cv2.medianBlur(filled_img[:, :, 1], 31)
    fB = cv2.medianBlur(filled_img[:, :, 2], 31)
    filtered_img = np.stack((fR, fG, fB), axis=2)

    for i in range(filled_img.shape[0]):
        for j in range(filled_img.shape[1]):
            if (fR[i][j] < 2.2204e-16):
                fR[i][j] = 1e7
            if (fG[i][j] < 2.2204e-16):
                fG[i][j] = 1e7
            if (fB[i][j] < 2.2204e-16):
                fB[i][j] = 1e7
    tR = contrast_coeffcient(cR)
    tG = contrast_coeffcient(cG)
    tB = contrast_coeffcient(cB)

    max_img = np.stack(((tR * cR / fR), (tG * cG / fG), (tB * cB / fB)), axis=2)
    e_max = np.amax(max_img, 2)
    module2_specular_mask = e_max > T2_rel
    return module2_specular_mask

    # fR(fR <  2.2204e-16) = 1e7
    # fG(fG <  2.2204e-16) = 1e7
    # fB(fB <  2.2204e-16) = 1e7


# model2
specular_mask_T2_abs = calc_module1_specular_mask(cE, cG, cB, T1=190)

filled_img = filling_image_using_centroid_color(specular_mask_T2_abs, img)
plt.imshow(filled_img)
plt.show()
module2_specular_mask = calc_modul2_specular_mask(filled_img, T2_rel=1.2, cR=cR, cG=cG, cB=cB)
# module2_specular_mask = np.array(module2_specular_mask)

final_mask = np.zeros((w, h, 1))
for i in range(0, w):
    for j in range(0, h):
        if module2_specular_mask[i][j] == True or module1_specular_mask[i][j] == 255:
            final_mask[i][j][0] = 255
        else:
            final_mask[i][j][0] = 0

N_min = 5000
T3 = 5


def postprocessing(final_mask, cE, N_min, T3):
    kernel = np.ones((3, 3), np.uint8)
    final_mask = cv2.erode(final_mask, kernel, iterations=1)
    labeled_area = measure.label(final_mask)
    num_region = np.max(labeled_area[:])
    post_specular_mask = final_mask
    for i in range(1, num_region):
        index = np.where(labeled_area == i)
        if (len(index) >= N_min):
            post_specular_mask[index] = 0

    return post_specular_mask


mask = postprocessing(final_mask, cE, N_min=3000, T3=5)

mg_gray = cv2.imread('../data/fazhi/95.jpg', cv2.IMREAD_GRAYSCALE)
# 利用图像中要提取的目标区域与其背景在灰度特性上的差异,把图像看作具有不同灰度级的两类区域(目标区域和背景区域)的组合,选取一个比较合理的阈值
thresh = filters.threshold_otsu(mg_gray)

# ret,thresh = cv2.threshold(img,cv2.THRESH_BINARY)
# 根据阈值分割
TTTT = np.zeros((w, h))
dst = (mg_gray >= thresh) * 255.0

for i in range(0, w):
    for j in range(0, h):
        if mask[i][j] > 0 and dst[i][j] > 0:
            TTTT[i][j] = 255

image2 = np.concatenate([TTTT, mask, dst], axis=1)
#cv2.imwrite(r"D:\code dp\YXTX\data\New\TTTT2.jpg", TTTT)
plt.set_cmap("binary")
# plt.imshow(TTTT)
# plt.imshow(mask)
# plt.imshow(dst)
plt.imshow(image2)

plt.show()
cv2.imwrite('../data/New/95_mask.jpg',TTTT)
cv2.imwrite('../data/New/95_mask-image2.jpg',image2)

# cv2.imshow('d', module2_specular_mask)
# cv2.waitKey()
# module2_specular_mask = module2_specular_mask*255
# cv2.imshow('6',filled_img)
# plt.imshow(module2_specular_mask)
# #plt.set_camp('binary')
# plt.show()


### inpainting

decay_win_size = 10
decay_cof = 20


def InpainttingArnold2010(mask, img, decay_win_size, decay_cof):
    filled_img = filling_image_using_centroid_color(mask, img)
    cv2.imwrite('../data/New/95filled.jpg', filled_img)
    # plt.set_cmap("binary")
    # plt.imshow(filled_img)
    # plt.show()
    sig = 8
    gaussian_filtered_img = cv2.GaussianBlur(filled_img, (3, 3), sig, sig)
    cv2.imshow('s',gaussian_filtered_img)

    #mx = imfilter(double(specular_mask), ones(decay_win_size)/decay_cof)
    mx = scipy.ndimage.filters.convolve(mask, np.ones((decay_win_size,decay_win_size))/decay_cof, mode='nearest')
    cv2.imshow('mx',mx)
    cv2.waitKey()
    mx = mx + mask
    mx = (mx > 1) *1
    mx = np.array([mx])
    mx = mx.reshape(w,h,1)
    inpainted_img = mx * (gaussian_filtered_img) + (1 - mx) * img
    return inpainted_img


inpainted_img = InpainttingArnold2010(TTTT, img, decay_win_size, decay_cof)
#plt.imshow(inpainted_img)
cv2.imwrite('../data/New/95inpaint.jpg',inpainted_img)
plt.show()

自己添加一个固定阈值后做并集

import cv2
import numpy as np
import matplotlib.pyplot as plt
import scipy

from skimage import measure, filters, data

img = cv2.imread('../data/fazhi/8.jpg')
w = img.shape[0]
h = img.shape[1]

# model1
cR = img[:, :, 0]
cG = img[:, :, 1]
cB = img[:, :, 2]

cE = 0.2989 * cR + 0.5870 * cG + 0.1140 * cB


def calc_module1_specular_mask(cE, cG, cB, T1):
    p95_cG = cG * 0.95
    p95_cE = cE * 0.95
    p95_cB = cB * 0.95
    # p95_cG = prctile(cG(:), 95)
    # p95_cB = prctile(cB(:), 95)
    # p95_cE = prctile(cE(:), 95)
    rGE = p95_cG / p95_cE
    rBE = p95_cB / p95_cE
    img_new = np.zeros((w, h, 1))
    for i in range(0, w):
        for j in range(0, h):

            if all([(cG[i][j] > rGE[i][j] * T1), (cB[i][j] > rBE[i][j] * T1), (cE[i][j] > T1)]):
                img_new[i][j] = 255
            else:
                img_new[i][j] = 0

    return img_new


module1_specular_mask = calc_module1_specular_mask(cE, cG, cB, T1=240)
cv2.imwrite('../data/New/8-model1.jpg',module1_specular_mask)


# cv2.imshow('s',img)


def calc_centroid_color_info(specular_mask_T2_abs, img):
    kernel = np.ones((4, 4), np.uint8)
    dilated_mask_1 = cv2.erode(specular_mask_T2_abs, kernel, iterations=1)
    kernel = np.ones((6, 6), np.uint8)
    dilated_mask_2 = cv2.erode(specular_mask_T2_abs, kernel, iterations=1)
    centroid_color_area = dilated_mask_2 - dilated_mask_1
    labeled_area = measure.label(centroid_color_area)  # 连通区域标记
    num_region = np.max(labeled_area.reshape(labeled_area.shape[0] * labeled_area.shape[1]))
    centroid_color_info = []
    for i in range(1, num_region):
        dict = {}
        [row_index, col_index] = np.where(labeled_area == i)
        num_possible_specular_points = len(row_index)
        dict['centroid_row'] = np.mean(row_index)
        dict['centroid_col'] = np.mean(col_index)
        ii = img[row_index, col_index, :]
        dict['centroid_color'] = np.mean(ii,axis=0)  ## 不明确
        centroid_color_info.append(dict)

    return centroid_color_info


def calc_distance(x, y, x1, y1):
    distance_to_centroid = np.sqrt((x - x1) ** 2 + (y - y1) ** 2)
    return distance_to_centroid


def find_the_nearest_region(centroid_color_info, pixel_row, pixel_col):
    num_region = len(centroid_color_info)
    nearset_region_index = 0
    nearset_distance = 1e6
    for j in range(1, num_region):
        distance_to_centroid = calc_distance(pixel_row, pixel_col, centroid_color_info[j]['centroid_row'],
                                             centroid_color_info[j]['centroid_col'])
        if distance_to_centroid < nearset_distance:
            nearset_distance = distance_to_centroid
            nearset_region_index = j

    nearest_region = centroid_color_info[nearset_region_index]
    return nearest_region


def filling_image_using_centroid_color(specular_mask_T2_abs, img):
    # filling possible specular highlights by the centroid color
    centroid_color_info = calc_centroid_color_info(specular_mask_T2_abs, img)
    specular_mask_T2_abss = specular_mask_T2_abs.reshape(specular_mask_T2_abs.shape[0], specular_mask_T2_abs.shape[1])
    [row_index, col_index] = np.where(specular_mask_T2_abss > 0)
    num_possible_specular_points = len(row_index)
    filled_img = img
    for i in range(1, num_possible_specular_points):
        # looking for the nearst centroid color for every specular point
        # and fill it
        nearest_region = find_the_nearest_region(centroid_color_info, row_index[i], col_index[i])
        filled_img[row_index[i], col_index[i], :] = nearest_region['centroid_color']
    return filled_img


def contrast_coeffcient(c):
    mean_c = np.mean(c);
    std_c = np.std(c);
    t = 1 / ((mean_c + std_c) / mean_c)
    return t


def calc_modul2_specular_mask(filled_img, T2_rel, cR, cG, cB):
    R = filled_img[:, :, 0]
    fR = cv2.medianBlur(R, 31)
    fG = cv2.medianBlur(filled_img[:, :, 1], 31)
    fB = cv2.medianBlur(filled_img[:, :, 2], 31)
    filtered_img = np.stack((fR, fG, fB), axis=2)

    for i in range(filled_img.shape[0]):
        for j in range(filled_img.shape[1]):
            if (fR[i][j] < 2.2204e-16):
                fR[i][j] = 1e7
            if (fG[i][j] < 2.2204e-16):
                fG[i][j] = 1e7
            if (fB[i][j] < 2.2204e-16):
                fB[i][j] = 1e7
    tR = contrast_coeffcient(cR)
    tG = contrast_coeffcient(cG)
    tB = contrast_coeffcient(cB)

    max_img = np.stack(((tR * cR / fR), (tG * cG / fG), (tB * cB / fB)), axis=2)
    e_max = np.amax(max_img, 2)
    module2_specular_mask = e_max > T2_rel
    return module2_specular_mask

    # fR(fR <  2.2204e-16) = 1e7
    # fG(fG <  2.2204e-16) = 1e7
    # fB(fB <  2.2204e-16) = 1e7


# model2
specular_mask_T2_abs = calc_module1_specular_mask(cE, cG, cB, T1=190)

filled_img = filling_image_using_centroid_color(specular_mask_T2_abs, img)
#plt.imshow(filled_img)
plt.show()
module2_specular_mask = calc_modul2_specular_mask(filled_img, T2_rel=1.2, cR=cR, cG=cG, cB=cB)
# module2_specular_mask = np.array(module2_specular_mask)

final_mask = np.zeros((w, h, 1))
for i in range(0, w):
    for j in range(0, h):
        if module2_specular_mask[i][j] == True or module1_specular_mask[i][j] == 255:
            final_mask[i][j][0] = 255
        else:
            final_mask[i][j][0] = 0

N_min = 5000
T3 = 5


def postprocessing(final_mask, cE, N_min, T3):
    kernel = np.ones((3, 3), np.uint8)
    final_mask = cv2.erode(final_mask, kernel, iterations=1)
    labeled_area = measure.label(final_mask)
    num_region = np.max(labeled_area[:])
    post_specular_mask = final_mask
    for i in range(1, num_region):
        index = np.where(labeled_area == i)
        if (len(index) >= N_min):
            post_specular_mask[index] = 0

    return post_specular_mask


mask = postprocessing(final_mask, cE, N_min=3000, T3=5)

mg_gray = cv2.imread('../data/fazhi/8.jpg', cv2.IMREAD_GRAYSCALE)
# 利用图像中要提取的目标区域与其背景在灰度特性上的差异,把图像看作具有不同灰度级的两类区域(目标区域和背景区域)的组合,选取一个比较合理的阈值
thresh = filters.threshold_otsu(mg_gray)

# ret,thresh = cv2.threshold(img,cv2.THRESH_BINARY)
# 根据阈值分割
TTTT = np.zeros((w, h))
dst = (mg_gray >= thresh) * 255.0

for i in range(0, w):
    for j in range(0, h):
        if mask[i][j] > 0 and dst[i][j] > 0:
            TTTT[i][j] = 255


## 固定阈值

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

blurred = cv2.GaussianBlur(gray, (11, 11), 0)

# threshold the image to reveal light regions in the
# blurred image

#y = blurred[:,:,2]/(blurred[:,:,1] + blurred[:,:,2] + blurred[:,:,3])

th = cv2.threshold(blurred, 200, 255, cv2.THRESH_BINARY)[1]


for i in range(0, w):
    for j in range(0, h):
        if TTTT[i][j]>0 or th[i][j]>0:
            TTTT[i][j]=255




image2 = np.concatenate([TTTT, mask, dst], axis=1)
#cv2.imwrite(r"D:\code dp\YXTX\data\New\TTTT2.jpg", TTTT)
plt.set_cmap("binary")
# plt.imshow(TTTT)
# plt.imshow(mask)
# plt.imshow(dst)
#plt.imshow(image2)

plt.show()
cv2.imwrite('../data/New/8_mask+uzhi.jpg',TTTT)
  • 0
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值