nnunet详细预处理过程

crop部分
重采样和归一化

重采样 代码部分整理

# -*- coding: utf-8 -*-
# @Time : 2022/8/24 13:43
# @Author :ly
# @Site : 
# @File : resmaple.py
# @Software: PyCharm
import SimpleITK
import numpy as np


### 获得重采样的目标值
def get_target_spacing(spacings,sizes):
    '''
    :param spacings: 数据集的所有spacing  np的形式
    :param sizes: 数据集的所有size  np的形式
    :return:
    :param target: 重采样要是用的目标spacing
    '''

    target = np.percentile(np.vstack(spacings), 50, 0)


    target_size = np.percentile(np.vstack(sizes), 50, 0)
    worst_spacing_axis = np.argmax(target)
    other_axes = [i for i in range(len(target)) if i != worst_spacing_axis]
    other_spacings = [target[i] for i in other_axes]
    other_sizes = [target_size[i] for i in other_axes]

    ## 对于各异性的处理

    has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings))
    has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes)

    if has_aniso_spacing and has_aniso_voxels:
        spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
        target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10)
        # don't let the spacing of that axis get higher than the other axes
        if target_spacing_of_that_axis < max(other_spacings):
            target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5
        target[worst_spacing_axis] = target_spacing_of_that_axis
    return target


from collections import OrderedDict
from skimage.transform import resize
from scipy.ndimage.interpolation import map_coordinates
from batchgenerators.augmentations.utils import resize_segmentation
def resample_data_or_seg(data, new_shape, is_seg, axis=None, order=3, do_separate_z=False, order_z=0):
    """
    separate_z=True will resample with order 0 along z
    :param data: 四维的np数组
    :param new_shape:
    :param is_seg:
    :param axis:
    :param order:
    :param do_separate_z:
    :param cval:
    :param order_z: only applies if do_separate_z is True
    :return:
    """
    assert len(data.shape) == 4, "data must be (c, x, y, z)"
    if is_seg:
        resize_fn = resize_segmentation
        kwargs = OrderedDict()
    else:
        resize_fn = resize
        kwargs = {'mode': 'edge', 'anti_aliasing': False}
    dtype_data = data.dtype
    shape = np.array(data[0].shape)
    new_shape = np.array(new_shape)
    if np.any(shape != new_shape):
        data = data.astype(float)
        if do_separate_z:
            print("separate z, order in z is", order_z, "order inplane is", order)
            assert len(axis) == 1, "only one anisotropic axis supported"
            axis = axis[0]
            if axis == 0:
                new_shape_2d = new_shape[1:]
            elif axis == 1:
                new_shape_2d = new_shape[[0, 2]]
            else:
                new_shape_2d = new_shape[:-1]

            reshaped_final_data = []
            for c in range(data.shape[0]):
                reshaped_data = []
                for slice_id in range(shape[axis]):
                    if axis == 0:
                        reshaped_data.append(resize_fn(data[c, slice_id], new_shape_2d, order, **kwargs))
                    elif axis == 1:
                        reshaped_data.append(resize_fn(data[c, :, slice_id], new_shape_2d, order, **kwargs))
                    else:
                        reshaped_data.append(resize_fn(data[c, :, :, slice_id], new_shape_2d, order,
                                                       **kwargs))
                reshaped_data = np.stack(reshaped_data, axis)
                if shape[axis] != new_shape[axis]:

                    # The following few lines are blatantly copied and modified from sklearn's resize()
                    rows, cols, dim = new_shape[0], new_shape[1], new_shape[2]
                    orig_rows, orig_cols, orig_dim = reshaped_data.shape

                    row_scale = float(orig_rows) / rows
                    col_scale = float(orig_cols) / cols
                    dim_scale = float(orig_dim) / dim

                    map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim]
                    map_rows = row_scale * (map_rows + 0.5) - 0.5
                    map_cols = col_scale * (map_cols + 0.5) - 0.5
                    map_dims = dim_scale * (map_dims + 0.5) - 0.5

                    coord_map = np.array([map_rows, map_cols, map_dims])
                    if not is_seg or order_z == 0:
                        reshaped_final_data.append(map_coordinates(reshaped_data, coord_map, order=order_z,
                                                                   mode='nearest')[None])
                    else:
                        unique_labels = np.unique(reshaped_data)
                        reshaped = np.zeros(new_shape, dtype=dtype_data)

                        for i, cl in enumerate(unique_labels):
                            reshaped_multihot = np.round(
                                map_coordinates((reshaped_data == cl).astype(float), coord_map, order=order_z,
                                                mode='nearest'))
                            reshaped[reshaped_multihot > 0.5] = cl
                        reshaped_final_data.append(reshaped[None])
                else:
                    reshaped_final_data.append(reshaped_data[None])
            reshaped_final_data = np.vstack(reshaped_final_data)
        else:
            print("no separate z, order", order)
            reshaped = []
            for c in range(data.shape[0]):
                reshaped.append(resize_fn(data[c], new_shape, order, **kwargs)[None])
            reshaped_final_data = np.vstack(reshaped)
        return reshaped_final_data.astype(dtype_data)
    else:
        print("no resampling necessary")
        return data
def get_lowres_axis(new_spacing):
    axis = np.where(max(new_spacing) / np.array(new_spacing) == 1)[0]  # find which axis is anisotropic
    return axis



nii = r"X:\nnunetfinal\dataset\input\hippocampus_009.nii.gz"
data = SimpleITK.ReadImage(nii)
ct_array = SimpleITK.GetArrayFromImage(data)
ct_array =ct_array[np.newaxis,...]
new_shape = np.array([32 ,55 ,36])
axis = np.array([1])
print(ct_array.shape)
data_reshaped = resample_data_or_seg(ct_array, new_shape, False, axis, order =3, do_separate_z=True,order_z=0)

实际在进行推测的时候我们没有必要这么麻烦 我只要使用后zoom就可以完成这个操作

new_shape = np.round(np.array(imgs.shape) * np.array(spacing) / np.array(new_spacing))
#true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
#双线性插值将是order = 1
#最临近插值的是order = 0
#样条差值是默认值(顺序=3)
imgs = zoom(imgs, resize_factor, mode='nearest', order=order)

cipy.ndimage.zoom(input, zoom, output=None, order=3, mode=‘constant’, cval=0.0, prefilter=True)
scipy.ndimage.zoom(

input,

#array—输入多维矩阵

zoom,

#float/sequence—沿轴的缩放系数,如果是浮点型,表示每个轴的缩放是相同的,如果是序列,zoom应包含每个轴的缩放值;

output=None,

#adrray or dtyoe—放置输出的数组,或返回数组的dtype,默认情况下,将创建与输入相同的dtype数据

order=3,

#int—样条插值的阶数,默认为3,顺序必须在0-5范围内;

mode=‘constant’,

#{‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}—mode参数确定输入数组如何扩展到其边界之外。 默认为“constant”;

cval=0.0,

#scalar—如果模式为“constant”,则填充输入的过去边缘的值, 默认值为0.0。

perfilter=True)

#bool—确定在插值之前是否使用spline_filter对输入数组进行预过滤。 默认值为True,如果order> 1,将创建一个过滤值的临时float64数组。如果将此值设置为False,如果order> 1,输出将略微模糊,除非输入是预 过滤的,即它是调用的结果 原始输入上的spline_filter

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值