1.在配置环境的过程中,通常会遇到如下图这个情况,通常是batchgeneratorsv2这个包没有安装成功。
解决方法:重新创建一个新环境,下载nnunet.zip
pip install -e .
最好用镜像安装,速度快,如下代码:
pip install -e . -i https://pypi.tuna.tsinghua.edu.cn/simple
就可以下载完成。
2.在训练的过程中会出现RuntimeError: One or more background workers are no longer alive. Exiting. Please check the print statements above for the actual error message报错,一般是由于磁盘空间不足,扩容即可。
3. 我的原始数据是png格式,在训练二维图像之前,由于这是2D图片数据,要将其转为3D数据,其实就是z轴为1的3维数据。
所以随意新建一个项目,创建一个2DDataProcessTo3D.py脚本
import os
import random
from tqdm import tqdm
import SimpleITK as sitk
import cv2
import numpy as np
def SplitDataset(img_path, train_percent=0.9):
data = os.listdir(img_path)
train_images = []
test_images = []
num = len(data)
train_num = int(num * train_percent)
indexes = list(range(num))
train = random.sample(indexes, train_num)
for i in indexes:
if i in train:
train_images.append(data[i])
else:
test_images.append(data[i])
return train_images, test_images
def conver(img_path, save_dir, mask_path=None, select_condition=None, mode="trian"):
os.makedirs(save_dir, exist_ok=True)
if mode == "train":
savepath_img = os.path.join(save_dir, 'imagesTr')
savepath_mask = os.path.join(save_dir, 'labelsTr')
elif mode == "test":
savepath_img = os.path.join(save_dir, 'imagesTs')
savepath_mask = os.path.join(save_dir, 'labelsTs')
os.makedirs(savepath_img, exist_ok=True)
if mask_path is not None:
os.makedirs(savepath_mask, exist_ok=True)
ImgList = os.listdir(img_path)
with tqdm(ImgList, desc="conver") as pbar:
for name in pbar:
if select_condition is not None and name not in select_condition:
continue
Img = cv2.imread(os.path.join(img_path, name))
if mask_path is not None:
Mask = cv2.imread(os.path.join(mask_path, name), 0)
Mask = (Mask / 255).astype(np.uint8)
if Img.shape[:2] != Mask.shape:
Mask = cv2.resize(Mask, (Img.shape[1], Img.shape[0]))
Img_Transposed = np.transpose(Img, (2, 0, 1))
Img_0 = Img_Transposed[0].reshape(1, Img_Transposed[0].shape[0], Img_Transposed[0].shape[1])
Img_1 = Img_Transposed[1].reshape(1, Img_Transposed[1].shape[0], Img_Transposed[1].shape[1])
Img_2 = Img_Transposed[2].reshape(1, Img_Transposed[2].shape[0], Img_Transposed[2].shape[1])
if mask_path is not None:
Mask = Mask.reshape(1, Mask.shape[0], Mask.shape[1])
Img_0_name = name.split('.')[0] + '_0000.nii.gz'
Img_1_name = name.split('.')[0] + '_0001.nii.gz'
Img_2_name = name.split('.')[0] + '_0002.nii.gz'
if mask_path is not None:
Mask_name = name.split('.')[0] + '.nii.gz'
Img_0_nii = sitk.GetImageFromArray(Img_0)
Img_1_nii = sitk.GetImageFromArray(Img_1)
Img_2_nii = sitk.GetImageFromArray(Img_2)
if mask_path is not None:
Mask_nii = sitk.GetImageFromArray(Mask)
sitk.WriteImage(Img_0_nii, os.path.join(savepath_img, Img_0_name))
sitk.WriteImage(Img_1_nii, os.path.join(savepath_img, Img_1_name))
sitk.WriteImage(Img_2_nii, os.path.join(savepath_img, Img_2_name))
if mask_path is not None:
sitk.WriteImage(Mask_nii, os.path.join(savepath_mask, Mask_name))
if __name__ == "__main__":
train_percent = 1
img_path = r".../img"
mask_path = r".../mask"
output_folder = r"./dataset"
os.makedirs(output_folder, exist_ok=True)
train_images, test_images = SplitDataset(img_path, train_percent)
conver(img_path, output_folder, mask_path, train_images, mode="train")
conver(img_path, output_folder, mask_path, test_images, mode="test")
保存至/root/autodl-tmp/nnUNet/nnUNet-master/DATASET/nnUNet_raw/nnUNet_raw_data/Dataset600_Liver路径下
4.在训练2d数据集的过程中,会在最后自动推理并预测出测试集的分割图像以及评估指标,我在评估指标中增加了特异性和敏感性指标。最终的指标会在/nnUNet-master/DATASET/nnUNet_trained_models/Dataset600_Liver/nnUNetTrainer__nnUNetPlans__2d/fold_2/validation/summary.json中。具体改动如下,在/nnUNet-master/nnunetv2/evaluation/evaluate_predictions.py中
更新compute_tp_fp_fn_tn
函数
def compute_tp_fp_fn_tn(mask_ref: np.ndarray, mask_pred: np.ndarray, ignore_mask: np.ndarray = None):
if ignore_mask is None:
use_mask = np.ones_like(mask_ref, dtype=bool)
else:
use_mask = ~ignore_mask
tp = np.sum((mask_ref & mask_pred) & use_mask)
fp = np.sum(((~mask_ref) & mask_pred) & use_mask)
fn = np.sum((mask_ref & (~mask_pred)) & use_mask)
tn = np.sum(((~mask_ref) & (~mask_pred)) & use_mask)
# 返回TP, FP, FN, TN
return tp, fp, fn, tn
更新compute_metrics
函数
def compute_metrics(reference_file: str, prediction_file: str, image_reader_writer: BaseReaderWriter,
labels_or_regions: Union[List[int], List[Union[int, Tuple[int, ...]]]],
ignore_label: int = None) -> dict:
# 读取参考图像和预测图像
seg_ref, seg_ref_dict = image_reader_writer.read_seg(reference_file)
seg_pred, seg_pred_dict = image_reader_writer.read_seg(prediction_file)
ignore_mask = seg_ref == ignore_label if ignore_label is not None else None
results = {}
results['reference_file'] = reference_file
results['prediction_file'] = prediction_file
results['metrics'] = {}
# 针对每个标签或区域计算指标
for r in labels_or_regions:
results['metrics'][r] = {}
mask_ref = region_or_label_to_mask(seg_ref, r)
mask_pred = region_or_label_to_mask(seg_pred, r)
tp, fp, fn, tn = compute_tp_fp_fn_tn(mask_ref, mask_pred, ignore_mask)
# 计算Dice和IoU
if tp + fp + fn == 0:
results['metrics'][r]['Dice'] = np.nan
results['metrics'][r]['IoU'] = np.nan
else:
results['metrics'][r]['Dice'] = 2 * tp / (2 * tp + fp + fn)
results['metrics'][r]['IoU'] = tp / (tp + fp + fn)
# 计算敏感性(Sensitivity)和特异性(Specificity)
sensitivity = tp / (tp + fn) if (tp + fn) > 0 else np.nan
specificity = tn / (tn + fp) if (tn + fp) > 0 else np.nan
# 将结果存储到字典中
results['metrics'][r]['FP'] = fp
results['metrics'][r]['TP'] = tp
results['metrics'][r]['FN'] = fn
results['metrics'][r]['TN'] = tn
results['metrics'][r]['n_pred'] = fp + tp
results['metrics'][r]['n_ref'] = fn + tp
results['metrics'][r]['Sensitivity'] = sensitivity
results['metrics'][r]['Specificity'] = specificity
return results
这样,在预测时就可以得到特异性以及敏感性指标了