train_dict = load_json(os.path.join(data_route, dataset, 'image2label_train.json'))
print(f"check {dataset} train files")
train_img_files = list(train_dict.keys())
train_mask_files = list(train_dict.values())
for index in range(len(train_img_files)):
img_path = train_img_files[index]
img_name_all = img_path.split('.')[0].split('/')
img_name = img_name_all[9]
new_img_path = os.path.join('/media/wagnchogn/data_2tb/path_sam/_0418_code/bash_code/sam_med_2d/sh/test_data_conesp/train_img'
, img_name + '.png')
mask_files = train_mask_files[index]
ori_np_img = cv2.imread(img_path)
ori_np_img = cv2.resize(ori_np_img, (256, 256), interpolation=cv2.INTER_NEAREST)
cv2.imwrite(new_img_path, ori_np_img)
for mask_file in mask_files:
ori_np_mask = cv2.imread(mask_file, 0)
mask_name = mask_file.split('.')[0].rsplit('/')[-1]
if ori_np_mask is None:
print(f"Mask files not exits. \033[1;31m{mask_file}\033[0m")
continue
if ori_np_mask.max() == 255:
ori_np_mask = ori_np_mask / 255
###
ori_np_mask = cv2.resize(ori_np_mask, (256, 256), interpolation=cv2.INTER_NEAREST)
mask_dir = os.path.join('/media/wagnchogn/data_2tb/path_sam/_0418_code/bash_code/sam_med_2d/sh/test_data_conesp/train_mask'
, img_name)
if not os.path.exists(mask_dir):
os.makedirs(mask_dir)
new_mask_path = os.path.join(mask_dir, mask_name + '.png')
cv2.imwrite(new_mask_path, ori_np_mask)
resize 256
最新推荐文章于 2024-10-10 09:39:14 发布