图像拼接代码
import os
import shutil
import tqdm
import cv2
import matplotlib.pyplot as plt
save_path = 'C:/Users/mingyu/Documents/AAAA/20231108data/saliency map/concate_cod/'
datasets_root = 'D:/DataSet/Datasets/Val/'
method_path = 'D:/DataSet/Datasets/'
# datasets = ['DUT-RGBD', 'LFSD', 'NJU2K', 'NLPR', 'SIP', 'SSD', 'STERE']
# datasets = ['LFSD', 'NJU2K', 'NLPR', 'SIP', 'SSD', 'STERE']
datasets = ['CAMO', 'CHAMELEON', 'COD10K']
# datasets = ['NJU2K']
# datasets = ['SIP', 'SSD', 'STERE']
# method_ls = ['Ours', 'C2DFNet', 'CATNet', 'CAVER', 'CIRNet', 'CPNet', 'DCMF', 'HiDANet', 'HRTransNet', 'MVSalNet',
# 'PICR-Net',
# 'RD3D+', 'SPNet', 'SPSN', 'SwinNet', 'VST']
method_ls = ['FEDER', 'FPNet', 'FSPNet']
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.left'] = False
plt.rcParams['axes.spines.bottom'] = False
for dataset in datasets:
print(f'\n正在处理{dataset}...')
if not os.path.exists(save_path + dataset):
os.makedirs(save_path + dataset)
image_names = sorted(i.split('.')[0] for i in os.listdir(f'{datasets_root}{dataset}/RGB'))
for image in tqdm.tqdm(image_names):
method_img_dict = {}
for method in method_ls:
method_img_dict[method] = f'{method_path + method}/{dataset}/{image}.png'
rgb_path = f'{datasets_root + dataset}/RGB/{image}.jpg'
depth_path = f'{datasets_root + dataset}/Edge/{image}.png'
gt_path = f'{datasets_root + dataset}/GT/{image}.png'
# 创建文件夹,复制图片
image_dest_path = save_path + dataset + '/' + image
os.makedirs(image_dest_path, exist_ok=True)
method_img_dict['RGB'] = rgb_path
method_img_dict['Edge'] = depth_path
method_img_dict['GT'] = gt_path
for key in method_img_dict.keys():
shutil.copy(method_img_dict[key], image_dest_path + f'/{key}.png')
# 在这里进行图片拼接
len_method = len(method_ls) + 3
plt.figure(0, (6, 2), dpi=600)
plt.text(0.5, 0.0, dataset + ' ' + image + '.jpg'), plt.xticks([]), plt.yticks([])
ready_list = ['RGB', 'Edge', 'GT']
gt_shape = ()
for step, i_method in enumerate(ready_list):
imread = cv2.imread(method_img_dict[i_method])
if i_method == 'RGB':
imread = cv2.cvtColor(imread, cv2.COLOR_BGR2RGB)
if i_method == 'GT':
gt_shape = imread.shape
plt.subplot(1, len_method, step + 1), plt.imshow(imread)
plt.title(i_method), plt.xticks([]), plt.yticks([])
for step, i_method in enumerate(method_ls):
cv__imread = cv2.imread(method_img_dict[i_method])
cv__imread = cv2.resize(cv__imread, (gt_shape[1], gt_shape[0]))
plt.subplot(1, len_method, step + 4), plt.imshow(cv__imread)
plt.title(i_method), plt.xticks([]), plt.yticks([])
concate_result_path = save_path + dataset + '/A_concate/'
os.makedirs(concate_result_path, exist_ok=True)
plt.savefig(concate_result_path + image + '.jpg', bbox_inches='tight', pad_inches=0.2)
plt.close()
图像拼接效果
![](https://img-blog.csdnimg.cn/direct/eead13cf887e4cea8f2f9d0931eb81d4.jpeg)
![](https://img-blog.csdnimg.cn/direct/445f7436864d4042a41bf3163690bf25.jpeg)
![](https://img-blog.csdnimg.cn/direct/0613d2d0b4d044fe89a6cbdc32f2a929.jpeg)
图像挑选代码
import os
import shutil
import tqdm
import numpy as np
from PIL import Image
import decimal
result_num = 10 # 对每个数据集,好坏结果各保留几张
# 用图像拼接的保存地址
save_path = 'C:/Users/mingyu/Documents/AAAA/20231108data/saliency map/concate2/'
datasets_root = 'D:/DataSet/SOD/RGB-D SOD/test_data/'
method_path = 'C:/Users/mingyu/Documents/AAAA/20231108data/saliency map/duibi/'
datasets = ['DUT-RGBD', 'LFSD', 'NJU2K', 'NLPR', 'SIP', 'SSD', 'STERE']
# datasets = ['DUT-RGBD', 'NJU2K', 'NLPR', 'SIP', 'SSD', 'STERE']
# datasets = ['LFSD']
# datasets = ['SIP', 'SSD', 'STERE']
method_ls = ['Ours', 'C2DFNet', 'CATNet', 'CAVER', 'CIRNet', 'CPNet', 'DCMF', 'HiDANet', 'HRTransNet', 'MVSalNet',
'PICR-Net',
'RD3D+', 'SPNet', 'SPSN', 'SwinNet', 'VST']
def calc_mae(img1_path, img2_path):
"""
计算两张由 PIL.Image 打开,并转换为灰度图的两张图片之间的 mae
:param img1_path: path
:param img2_path: path
:return: mae
"""
assert os.path.exists(img1_path)
assert os.path.exists(img2_path)
img1, img2 = Image.open(img1_path).convert('L'), Image.open(img2_path).convert('L')
size_gt = img1.size
img2 = img2.resize(size_gt)
array1, array2 = np.array(img1) / 255., np.array(img2) / 255.
mae = np.mean(np.abs(array1 - array2))
# 保留3位小数,四舍五入
mae_3 = decimal.Decimal(mae).quantize(decimal.Decimal('0.000'), rounding=decimal.ROUND_HALF_UP)
return mae_3
assert result_num > 0 and isinstance(result_num,int), "结果数应为正整数"
for dataset in datasets:
print(f'\n正在处理{dataset}...')
if not os.path.exists(save_path + dataset):
os.makedirs(save_path + dataset)
image_names = sorted(i.split('.')[0] for i in os.listdir(f'{datasets_root}{dataset}/RGB'))
mae_dataset_detail = {} # mae_dataset_detail = {'image_name': [0.015, 0.025, -0.010]} [Ours mae,其余方法的平均mae, 我们的减去其他的]
for image in tqdm.tqdm(image_names):
method_img_dict = {}
for method in method_ls:
method_img_dict[method] = f'{method_path + method}/{dataset}/{image}.png'
rgb_path = f'{datasets_root + dataset}/RGB/{image}.jpg'
depth_path = f'{datasets_root + dataset}/depth/{image}.png'
gt_path = f'{datasets_root + dataset}/GT/{image}.png'
# 在这里计算mae
mae_ours = calc_mae(gt_path, method_img_dict['Ours'])
mae_methods_ls = []
for step, i_method in enumerate(method_img_dict): #
if i_method == 'Ours':
continue
mae_method = calc_mae(gt_path, method_img_dict[i_method])
mae_methods_ls.append(mae_method)
mae_methods_mean = decimal.Decimal(np.mean(mae_methods_ls)).quantize(decimal.Decimal('0.000'),
rounding=decimal.ROUND_HALF_UP) # 一张图片,对比方法的平均mae
mae_dataset_detail[f'{image}.jpg'] = [float(mae_ours), float(mae_methods_mean),
float(mae_ours - mae_methods_mean)]
# print(f'{dataset} {image}.jpg {mae_ours} | {mae_methods_mean} | {mae_ours - mae_methods_mean}')
# 排序
subtractive_grade_dict = dict(
sorted(mae_dataset_detail.items(), key=lambda item: item[1][2])) # 查询出我们更好的(mae差距更大的靠前)
subtractive_grade_dict2 = dict(
sorted(mae_dataset_detail.items(), key=lambda item: item[1][0])) # 查询出我们更好的(mae小的靠前)
our_grade_dict = dict(
sorted(mae_dataset_detail.items(), key=lambda item: item[1][0], reverse=True)) # 查询出我们较差的(Ours mae更高的靠前)
grade_image_ranking = [i for i in subtractive_grade_dict.keys()] # Ours 更好的在前面
grade_image_ranking2 = [i for i in subtractive_grade_dict2.keys()] # Ours 更好的在前面
our_worse_image_ranking = [i for i in our_grade_dict.keys()] # Ours 更好的在前面
better_dest_path = save_path + dataset + '/'
worse_dest_path = save_path + dataset + '/'
os.makedirs(better_dest_path, exist_ok=True)
os.makedirs(worse_dest_path, exist_ok=True)
with open(f'{better_dest_path}000_duibi_better_names.txt', 'w') as f:
for img_name in grade_image_ranking[0:result_num]:
f.write(f'{img_name}\n')
with open(f'{better_dest_path}000_better_names.txt', 'w') as f:
for img_name in grade_image_ranking2[0:result_num]:
f.write(f'{img_name}\n')
with open(f'{worse_dest_path}000_worse_names.txt', 'w') as f:
for img_name in our_worse_image_ranking[0:result_num]:
f.write(f'{img_name}\n')
if os.path.exists(f'{save_path}/{dataset}/A_concate/'):
dest_path = f'{save_path}/0000_duibi_better_image/'
os.makedirs(dest_path, exist_ok=True)
for step, better_concate_img in enumerate(grade_image_ranking[0:result_num]):
shutil.copy(f'{save_path}/{dataset}/A_concate/{better_concate_img}',
f'{dest_path}duibi ranking-{step + 1}_{better_concate_img}')
dest_path3 = f'{save_path}/0000_better_image/'
os.makedirs(dest_path3, exist_ok=True)
for step, better_concate_img3 in enumerate(grade_image_ranking2[0:result_num]):
shutil.copy(f'{save_path}/{dataset}/A_concate/{better_concate_img3}',
f'{dest_path3}better ranking-{step + 1}_{better_concate_img3}')
dest_path2 = f'{save_path}/0000_worse_image/'
os.makedirs(dest_path2, exist_ok=True)
for step, worse_concate_img in enumerate(our_worse_image_ranking[0:result_num]):
shutil.copy(f'{save_path}/{dataset}/A_concate/{worse_concate_img}',
f'{dest_path2}worse ranking-{step + 1}_{worse_concate_img}')
图像挑选结果
![](https://img-blog.csdnimg.cn/direct/1fff6160e1b3425a942ae1cb3023c02a.jpeg)
![](https://img-blog.csdnimg.cn/direct/6519f08c52ab4e9594ab42da86ee5f19.jpeg)
![](https://img-blog.csdnimg.cn/direct/f5514c2dd25f4962a29434c1fdcacb9a.jpeg)