LITS挑战肝分割

本文详细介绍了参与LITS挑战的肝分割过程,包括配置设置、使用CRF进行后处理优化,以及多个阶段的实验数据和验证步骤,探讨了numpy_data_layer在数据处理中的应用。
摘要由CSDN通过智能技术生成

LITS挑战肝分割

config

import logging

# Logging level#日志记录级别
log_level = logging.INFO

logfile = 'output.txt'

# Number of CPUs used for parallel processing#用于并行处理的CPU数量
N_PROC = 14

#Maximum number of iterations before optimisation is stopped#优化停止前的最大迭代次数
MAX_N_IT = -1


# Image/Seg shape#图片/ SEG形状
slice_shape = (388,388)

#Initial Parameters#初始参数
params_initial_liver = [\
3,  # pos_x_std
0.75,  # pos_y_std
3,  # pos_z_std
60,  # bilateral_x_std
15,  # bilateral_y_std
15,  # bilateral_z_std
20.0, # bilateral_intensity_std
0.75,  # pos_w
1.0   # bilateral_w  #we fix this one during optimization#WE解决优化过程中这一个
]

params_initial_lesion = [\
3.0,  # pos_x_std
3.0,  # pos_y_std
3.0,  # pos_z_std
60.0,  # bilateral_x_std
60.0,  # bilateral_y_std
60.0,  # bilateral_z_std
20.0, # bilateral_intensity_std
3.0,  # pos_w
10.0   # bilateral_w  #we fix this one during optimization#WE解决优化过程中这一个
]


### CHOOSE LIVER OR LESION# ## CHOOSE肝或病变
params_initial = params_initial_liver
target_label = 1


#Fixed CRF Parameters#固定CRF参数
max_iterations = 20
dynamic_z = False
ignore_memory = True




###########################
##### 3DIRCA DATASET ######
###########################


test_set=[
(82, '/home/guest/training/volume-82.npy', '/home/guest/training/segmentation-82.npy') ,
(74, '/home/guest/training/volume-74.npy', '/home/guest/training/segmentation-74.npy') ,
(125, '/home/guest/training/volume-125.npy', '/home/guest/training/segmentation-125.npy') ,
(11, '/home/guest/training/volume-11.npy', '/home/guest/training/segmentation-11.npy') ,
(89, '/home/guest/training/volume-89.npy', '/home/guest/training/segmentation-89.npy') ,
(78, '/home/guest/training/volume-78.npy', '/home/guest/training/segmentation-78.npy') ,
(64, '/home/guest/training/volume-64.npy', '/home/guest/training/segmentation-64.npy') ,
(126, '/home/guest/training/volume-126.npy', '/home/guest/training/segmentation-126.npy') ,
(129, '/home/guest/training/volume-129.npy', '/home/guest/training/segmentation-129.npy') ,
(114, '/home/guest/training/volume-114.npy', '/home/guest/training/segmentation-114.npy') ,
(37, '/home/guest/training/volume-37.npy', '/home/guest/training/segmentation-37.npy') ,
(25, '/home/guest/training/volume-25.npy', '/home/guest/training/segmentation-25.npy') ,
(85, '/home/guest/training/volume-85.npy', '/home/guest/training/segmentation-85.npy') ,
(80, '/home/guest/training/volume-80.npy', '/home/guest/training/segmentation-80.npy') ,
(27, '/home/guest/training/volume-27.npy', '/home/guest/training/segmentation-27.npy') ,
(18, '/home/guest/training/volume-18.npy', '/home/guest/training/segmentation-18.npy') ,
(69, '/home/guest/training/volume-69.npy', '/home/guest/training/segmentation-69.npy') ,
(40, '/home/guest/training/volume-40.npy', '/home/guest/training/segmentation-40.npy') ,
(61, '/home/guest/training/volume-61.npy', '/home/guest/training/segmentation-61.npy') ,
(117, '/home/guest/training/volume-117.npy', '/home/guest/training/segmentation-117.npy') ,
(44, '/home/guest/training/volume-44.npy', '/home/guest/training/segmentation-44.npy') ,
(26, '/home/guest/training/volume-26.npy', '/home/guest/training/segmentation-26.npy') ,
(91, '/home/guest/training/volume-91.npy', '/home/guest/training/segmentation-91.npy') ,
(65, '/home/guest/training/volume-65.npy', '/home/guest/training/segmentation-65.npy') ,
(55, '/home/guest/training/volume-55.npy', '/home/guest/training/segmentation-55.npy') ,
(5, '/home/guest/training/volume-5.npy', '/home/guest/training/segmentation-5.npy') ,
(77, '/home/guest/training/volume-77.npy', '/home/guest/training/segmentation-77.npy') ,
(12, '/home/guest/training/volume-12.npy', '/home/guest/training/segmentation-12.npy') ,
(28, '/home/guest/training/volume-28.npy', '/home/guest/training/segmentation-28.npy') ,
(6, '/home/guest/training/volume-6.npy', '/home/guest/training/segmentation-6.npy') ,
(79, '/home/guest/training/volume-79.npy', '/home/guest/training/segmentation-79.npy') ,
(84, '/home/guest/training/volume-84.npy', '/home/guest/training/segmentation-84.npy') ,
(103, '/home/guest/training/volume-103.npy', '/home/guest/training/segmentation-103.npy') ,
(101, '/home/guest/training/volume-101.npy', '/home/guest/training/segmentation-101.npy') ,
(106, '/home/guest/training/volume-106.npy', '/home/guest/training/segmentation-106.npy') ,
(59, '/home/guest/training/volume-59.npy', '/home/guest/training/segmentation-59.npy') ,
(45, '/home/guest/training/volume-45.npy', '/home/guest/training/segmentation-45.npy') ,
(53, '/home/guest/training/volume-53.npy', '/home/guest/training/segmentation-53.npy') ,
(41, '/home/guest/training/volume-41.npy', '/home/guest/training/segmentation-41.npy') ,
(121, '/home/guest/training/volume-121.npy', '/home/guest/training/segmentation-121.npy')]


# Select dataset#选择数据集
#dataset数据集 = [irca_train_fold1, irca_test_fold1,\
#		irca_train_fold2, irca_test_fold2,\
#		irca_train_fold3, irca_test_fold3,\
#		irca_train_fold4, irca_test_fold4]
#
# Datset测试
dataset = test_set

CRF优化

#! /usr/bin/env python


import numpy as np

import logging
import config

from denseinference import CRFProcessor

from multiprocessing import Pool, Manager

import nibabel as nib

import scipy.misc
import os

import medpy.metric

# global list for volumes#voluum全局列表
volumes = []

# best results so far#目前为止效果最好
best_dice = -1
best_params = None

n_iterations = 0

IMG_DTYPE = np.float
SEG_DTYPE = np.uint8


def to_scale(img, shape=None):
    if shape is None:
        shape = config.slice_shape

    height, width = shape
    if img.dtype == SEG_DTYPE:
        return scipy.misc.imresize(img, (height, width), interp="nearest").astype(SEG_DTYPE)
    elif img.dtype == IMG_DTYPE:
        factor = 256.0 / np.max(img)
        return (scipy.misc.imresize(img, (height, width), interp="nearest") / factor).astype(IMG_DTYPE)
    else:
        raise TypeError(
            'Error. To scale the image array, its type must be np.uint8 or np.float64. (' + str(img.dtype) + ')')


def norm_hounsfield_dyn(arr, c_min=0.1, c_max=0.3):
    """ Converts from hounsfield units to float64 image with range 0.0 to 1.0 """
    # calc min and max#,计算的最小和最大
    min, max = np.amin(arr), np.amax(arr)
    if min <= 0:
        arr = np.clip(arr, min * c_min, max * c_max)
        # right shift to zero#右移到零
        arr = np.abs(min * c_min) + arr
    else:
        arr = np.clip(arr, min, max * c_max)
        # left shift to zero #左移到零
        arr = arr - min
    # normalization#正常化
    norm_fac = np.amax(arr)
    if norm_fac != 0:
        norm = np.divide(
            np.multiply(arr, 255),
            np.amax(arr))
    else:  # don't divide through 0#不要除以0
        norm = np.multiply(arr, 255)

    norm = np.clip(np.multiply(norm, 0.00390625), 0, 1)
    return norm


def histeq_processor(img):
    """Histogram equalization"""“”“直方图均衡”“”
    nbr_bins = 256
    # get image histogram#获取图像的直方图
    imhist, bins = np.histogram(img.flatten(), nbr_bins, normed=True)
    cdf = imhist.cumsum()  # cumulative distribution function#累积分布函数
    cdf = 255 * cdf / cdf[-1]  # normalize#正常化
    # use linear interpolation of cdf to find new pixel values#使用cdf的线性插值来查找新的像素值
    original_shape = img.shape
    img = np.interp(img.flatten(), bins[:-1], cdf)
    img = img / 256.0
    return img.reshape(original_shape)


def process_img_label(imgvol, segvol):
    """
    Process a given image volume and its label and return arrays as a new copy处理给定的图像卷及其标签,并将数组作为新副本返回
    :param imgvol:
    :param label_vol:
    :return:
    """
    imgvol_downscaled = np.zeros((config.slice_shape[0], config.slice_shape[1], imgvol.shape[2]))
    segvol_downscaled = np.zeros((config.slice_shape[0], config.slice_shape[1], imgvol.shape[2]))
    imgvol[imgvol > 1200] = 0

    for i in range(imgvol.shape[2]):
        # Get the current slice, normalize and downscale#获取当前切片,规范化和缩减
        slice = np.copy(imgvol[:, :, i])
        slice = norm_hounsfield_dyn(slice)
        slice = to_scale(slice, config.slice_shape)
        slice = histeq_processor(slice)
        imgvol_downscaled[:, :, i] = slice
        # downscale the label slice for the crf#缩减标签片用于慢性肾功能衰竭
        segvol_downscaled[:, :, i] = to_scale(segvol[:, :, i], config.slice_shape)

    return [imgvol_downscaled, segvol_downscaled]


def crf_worker(img, label, probvol, crfsettings):
    """
    Worker function for Parallel CRF Processing of multiple Volumes并行CRF处理多个卷的工作器函数
    :param img:
    :param label:
    :param prob:
    :param crfsettings:
    :return:  dice
    """
    pro = CRFProcessor.CRF3DProcessor(**crfsettings)
    # print "started crf"#打印“开始CRF”
    # print np.min(img), np.max(img)  #打印np.min(IMG),np.max(IMG)
    result = pro.set_data_and_run(img, probvol)
    # print np.unique(result)  #打印np.unique(结果)
    # print "done with crf"#打印“慢性肾功能衰竭做”
    _dice = medpy.metric.dc(result == 1, label == config.target_label)
    print "Dice of single volume: " + str(_dice)

    # not sure if that's necessary#不确定是否有必要
    del pro

    return _dice


def run_crf(params, grad):
    """
    :param pos_x_std:
    :param bilateral_x_std:
    :param bilateral_intensity_std:
    :param pos_w:
    :param bilateral_w:
    :return:
    """
    global best_dice, best_params, volumes, n_iterations

    n_iterations += 1
    # Stupid NLopt it always wants a grad even for algorithms that don't use gradient. If grad is not empty, something is wrong.
    # print grad#愚蠢NLopt它总是想即使不使用梯度算法的毕业生。如果毕业不是空的,那就错了。
    #打印毕业生

    pos_x_std, pos_y_std, pos_z_std, bilateral_x_std, bilateral_y_std, bilateral_z_std, bilateral_intensity_std, pos_w, bilateral_w = params

    # 	logging.info("=======================")
    # 	logging.info("Running CRF with the following parameters使用以下参数运行CRF:")
    # 	logging.info("pos x std: " + str(pos_x_std))
    # 	logging.info("pos y std: " + str(pos_y_std))
    # 	logging.info("pos z std: " + str(pos_z_std))
    # 	logging.info("pos w: " + str(pos_w))
    # 	logging.info("bilateral x std: " + str(bilateral_x_std))
    # 	logging.info("bilateral y std: " + str(bilateral_y_std))
    # 	logging.info("bilateral z std: " + str(bilateral_z_std))
    # 	logging.info("bilateral intensity std双边强度标准: " + str(bilateral_intensity_std))
    # 	logging.info("bilateral w: " + str(bilateral_w))

    # Here's something to come#这是未来的事情
    crfsettings = dict(max_iterations=config.max_iterations,
                       pos_x_std=pos_x_std,
                       pos_y_std=pos_y_std,
                       pos_z_std=pos_z_std,
                       pos_w=pos_w,
                       bilateral_x_std=bilateral_x_std,
                       bilateral_y_std=bilateral_y_std,
                       bilateral_z_std=bilateral_z_std,
                       bilateral_intensity_std=bilateral_intensity_std,
                       bilateral_w=bilateral_w,

                       dynamic_z=config.dynamic_z,
                       ignore_memory=config.ignore_memory)

    # list of dice scores#骰子得分列表
    dices = []
    # list of pipes#管道列表
    results = []

    pool = Pool(processes=config.N_PROC)

    # start results  #开始结果
    for img, label, voxelsize, prob in volumes:
        # Normalize z std according to volume's voxel slice spacing
        copy_crfsettings = dict(crfsettings)
        copy_crfsettings['pos_z_std'] *= voxelsize[2]  # z std grows with larger spacing between slices
        results.append(pool.apply_async(crf_worker, (img, label, prob, crfsettings)))
    # dices.append(crf_worker(img,label,prob,crfsettings))

    # get results#得到结果
    for p in results:
        dices.append(p.get())

    pool.close()

    dice_average = np.average(dices)

    logging.info("-----------------------")
    logging.info("Iteration迭代 : " + str(n_iterations))
    logging.info("Best avg dice was最好的平均值是: " + str(best_dice))
    logging.info("   with best params : " + str(best_params))
    logging.info("Current avg dice is当前平均骰子是: " + str(dice_average))
    logging.info("   with current params :" + str(params))
    logging.info("=======================")

    if dice_average >= best_dice:
        best_params = params
        best_dice = dice_average
        print 'FOUND BETTER PARAMS'打印 '找到更好的参数'

    return dice_average

实验数据

import logging

# Logging level#日志记录级别
log_level = logging.WARNING

# Takes only the first n volumes. Useful to create small datasets fast#仅占前n个卷。用于快速创建小型数据集
max_volumes = -1


# Pre-write processing#预写处理
# Processors applied to images/segmentations right before persisting them to database (after augmentation...etc)#处理器在将它们持久化到数据库之前应用于图像/分段(在增强之后......等)
# A processor takes 2 images img and seg, and returns a tuple (img,seg)#一个处理器需要2个图像img和seg,并返回一个元组(img,seg)
# Available processors:#可用处理器:
#  - processors.zoomliver_UNET_processor
#  - processors.plain_UNET_processor
#  - processors.histeq_processor
#  - processors.liveronly_label_processor
from numpy_data_layer import processors
processors_list = [processors.plain_UNET_processor]
# Step 1#第1步
#processors_list = [processors.histeq_processor, processors.plain_UNET_processor, processors.liveronly_label_processor]
#processors_list = [processors.histeq_processor, processors.plain_UNET_processor][1:]
# Step 2#第2步
#processors_list = [processors.remove_non_liver, processors.zoomliver_UNET_processor]
#processors_list = [processors.histeq_processor]

# Shuffle slices and their augmentations globally across the database#在整个数据库中全局切换切片及其扩充
# You might want to set to False if dataset = test_set#如果dataset = test_set,您可能希望设置为False
shuffle_slices = True

# Augmentation factor#增强因子
augmentation_factor = 10

# ** Labels order : tissue=0, liver=1, lesion=2# **标签顺序:组织= 0,肝脏= 1,病变= 2
# ** We call a slice "lesion slice" if the MAX label it has is 2# **如果它的MAX标签是2,我们称之为切片“病变切片”
# slice options: liver-lesion, stat-batch, dyn-batch#切片选项:肝脏病变,STAT-批,达因批
#
# liver-only:   Include only slices which are labeld with liver or lower (1 or 0)切片选项:肝脏病变,STAT-批,达因批
# lesion-only:  Include only slices which are labeled with lesion or lower (2, 1 or 0)仅包含标有病变或更低(2,1或0)的切片
# liver-lesion: Include only slices which are labeled with liver or lesion (slices with max=2 or with max=1)仅包括标有肝脏或病变的切片(max = 2或max = 1的切片)
select_slices = "all"
#select_slices = 'liver-lesion肝脏病变'

more_small_livers = False
# Percentage of the image, such that any liver small than that is considered small#图像的百分比,使得任何小于此的肝脏被认为是小的
small_liver_percent = 2

decrease_empty_slices = 0.9
# data=[
# (49, '/home/guest/training/volume-49.nii', '/home/guest/training/segmentation-49.nii') ,
# (42, '/home/guest/training/volume-42.nii', '/home/guest/training/segmentation-42.nii') ,
# (23, '/home/guest/training/volume-23.nii', '/home/guest/training/segmentation-23.nii') ,
# (26, '/home/guest/training/volume-26.nii', '/home/guest/training/segmentation-26.nii') ,
# (37, '/home/guest/training/volume-37.nii', '/home/guest/training/segmentation-37.nii') ,
# (46, '/home/guest/training/volume-46.nii', '/home/guest/training/segmentation-46.nii') ,
# (2, '/home/guest/training/volume-2.nii', '/home/guest/training/segmentation-2.nii') ,
# (24, '/home/guest/training/volume-24.nii', '/home/guest/training/segmentation-24.nii') ,
# (44, '/home/guest/training/volume-44.nii', '/home/guest/training/segmentation-44.nii') ,
# (6, '/home/guest/training/volume-6.nii', '/home/guest/training/segmentation-6.nii') ,
# (25, '/home/guest/training/volume-25.nii', '/home/guest/training/segmentation-25.nii') ,
# (18, '/home/guest/training/volume-18.nii', '/home/guest/training/segmentation-18.nii') ,
# (16, '/home/guest/training/volume-16.nii', '/home/guest/training/segmentation-16.nii') ,
# (60, '/home/guest/training/volume-60.nii', '/home/guest/training/segmentation-60.nii') ,
# (59, '/home/guest/training/volume-59.nii', '/home/guest/training/segmentation-59.nii') ,
# (33, '/home/guest/training/volume-33.nii', '/home/guest/training/segmentation-33.nii') ,
# (58, '/home/guest/training/volume-58.nii', '/home/guest/training/segmentation-58.nii') ,
# (31, '/home/guest/training/volume-31.nii', '/home/guest/training/segmentation-31.nii') ,
# (54, '/home/guest/training/volume-54.nii', '/home/guest/training/segmentation-54.nii') ,
# (52, '/home/guest/training/volume-52.nii', '/home/guest/training/segmentation-52.nii') ,
# (12, '/home/guest/training/volume-12.nii', '/home/guest/training/segmentation-12.nii') ,
# (41, '/home/guest/training/volume-41.nii', '/home/guest/training/segmentation-41.nii') ,
# (56, '/home/guest/training/volume-56.nii', '/home/guest/training/segmentation-56.nii') ,
# (14, '/home/guest/training/volume-14.nii', '/home/guest/training/segmentation-14.nii') ,
# (4, '/home/guest/training/volume-4.nii', '/home/guest/training/segmentation-4.nii') ,
# (51, '/home/guest/training/volume-51.nii', '/home/guest/training/segmentation-51.nii') ,
# (47, '/home/guest/training/volume-47.nii', '/home/guest/training/segmentation-47.nii') ,
# (38, '/home/guest/training/volume-38.nii', '/home/guest/training/segmentation-38.nii') ,
# (34, '/home/guest/training/volume-34.nii', '/home/guest/training/segmentation-34.nii') ,
# (19, '/home/guest/training/volume-19.nii', '/home/guest/training/segmentation-19.nii') ,
# (43, '/home/guest/training/volume-43.nii', '/home/guest/training/segmentation-43.nii') ,
# (9, '/home/guest/training/volume-9.nii', '/home/guest/training/segmentation-9.nii') ,
# (15, '/home/guest/training/volume-15.nii', '/home/guest/training/segmentation-15.nii') ,
# (39, '/home/guest/training/volume-39.nii', '/home/guest/training/segmentation-39.nii') ,
# (20, '/home/guest/training/volume-20.nii', '/home/guest/training/segmentation-20.nii') ,
# (17, '/home/guest/training/volume-17.nii', '/home/guest/training/segmentation-17.nii') ,
# (55, '/home/guest/training/volume-55.nii', '/home/guest/training/segmentation-55.nii') ,
# (30, '/home/guest/training/volume-30.nii', '/home/guest/training/segmentation-30.nii') ,
# (29, '/home/guest/training/volume-29.nii', '/home/guest/training/segmentation-29.nii') ,
# (7, '/home/guest/training/volume-7.nii', '/home/guest/training/segmentation-7.nii') ,
# (22, '/home/guest/training/volume-22.nii', '/home/guest/training/segmentation-22.nii') ,
# (8, '/home/guest/training/volume-8.nii', '/home/guest/training/segmentation-8.nii') ,
# (13, '/home/guest/training/volume-13.nii', '/home/guest/training/segmentation-13.nii') ,
# (40, '/home/guest/training/volume-40.nii', '/home/guest/training/segmentation-40.nii') ,
# (0, '/home/guest/training/volume-0.nii', '/home/guest/training/segmentation-0.nii') ,
# (53, '/home/guest/training/volume-53.nii', '/home/guest/training/segmentation-53.nii') ,
# (5, '/home/guest/training/volume-5.nii', '/home/guest/training/segmentation-5.nii') ,
# (1, '/home/guest/training/volume-1.nii', '/home/guest/training/segmentation-1.nii') ,
# (36, '/home/guest/training/volume-36.nii', '/home/guest/training/segmentation-36.nii') ,
# (10, '/home/guest/training/volume-10.nii', '/home/guest/training/segmentation-10.nii') ,
# (48, '/home/guest/training/volume-48.nii', '/home/guest/training/segmentation-48.nii') ,
# (28, '/home/guest/training/volume-28.nii', '/home/guest/training/segmentation-28.nii') ,
# (11, '/home/guest/training/volume-11.nii', '/home/guest/training/segmentation-11.nii') ,
# (50, '/home/guest/training/volume-50.nii', '/home/guest/training/segmentation-50.nii') ,
# (45, '/home/guest/training/volume-45.nii', '/home/guest/training/segmentation-45.nii') ,
# (3, '/home/guest/training/volume-3.nii', '/home/guest/training/segmentation-3.nii') ,
# (57, '/home/guest/training/volume-57.nii', '/home/guest/training/segmentation-57.nii') ,
# (35, '/home/guest/training/volume-35.nii', '/home/guest/training/segmentation-35.nii') ,
# (32, '/home/guest/training/volume-32.nii', '/home/guest/training/segmentation-32.nii') ,
# (21, '/home/guest/training/volume-21.nii', '/home/guest/training/segmentation-21.nii') ,
# (27, '/home/guest/training/volume-27.nii', '/home/guest/training/segmentation-27.nii')]
data=[
(35, '/home/guest/training/volume-35.npy', '/home/guest/training/segmentation-35.npy') ,
(127, '/home/guest/training/volume-127.npy', '/home/guest/training/segmentation-127.npy') ,
(122, '/home/guest/training/volume-122.npy', '/home/guest/training/segmentation-122.npy') ,
(83, '/home/guest/training/volume-83.npy', '/home/guest/training/segmentation-83.npy') ,
(123, '/home/guest/training/volume-123.npy', '/home/guest/training/segmentation-123.npy') ,
(93, '/home/guest/training/volume-93.npy', '/home/guest/training/segmentation-93.npy') ,
(108, '/home/guest/training/volume-108.npy', '/home/guest/training/segmentation-108.npy') ,
(98, '/home/guest/training/volume-98.npy', '/home/guest/training/segmentation-98.npy') ,
(46, '/home/guest/training/volume-46.npy', '/home/guest/training/segmentation-46.npy') ,
(51, '/home/guest/training/volume-51.npy', '/home/guest/training/segmentation-51.npy') ,
(19, '/home/guest/training/volume-19.npy', '/home/guest/training/segmentation-19.npy') ,
(62, '/home/guest/training/volume-62.npy', '/home/guest/training/segmentation-62.npy') ,
(120, '/home/guest/training/volume-120.npy', '/home/guest/training/segmentation-120.npy') ,
(87, '/home/guest/training/volume-87.npy', '/home/guest/training/segmentation-87.npy') ,
(7, '/home/guest/training/volume-7.npy', '/home/guest/training/segmentation-7.npy') ,
(54, '/home/guest/training/volume-54.npy', '/home/guest/training/segmentation-54.npy') ,
(102, '/home/guest/training/volume-102.npy', '/home/guest/training/segmentation-102.npy') ,
(105, '/home/guest/training/volume-105.npy', '/home/guest/training/segmentation-105.npy') ,
(81, '/home/guest/training/volume-81.npy', '/home/guest/training/segmentation-81.npy') ,
(97, '/home/guest/training/volume-97.npy', '/home/guest/training/segmentation-97.npy') ,
(88, '/home/guest/training/volume-88.npy', '/home/guest/training/segmentation-88.npy') ,
(39, '/home/guest/training/volume-39.npy', '/home/guest/training/segmentation-39.npy') ,
(1, '/home/guest/training/volume-1.npy', '/home/guest/training/segmentation-1.npy') ,
(124, '/home/guest/training/volume-124.npy', '/home/guest/training/segmentation-124.npy') ,
(34, '/home/guest/training/volume-34.npy', '/home/guest/training/segmentation-34.npy') ,
(31, '/home/guest/training/volume-31.npy', '/home/guest/training/segmentation-31.npy') ,
(42, '/home/guest/training/volume-42.npy', '/home/guest/training/segmentation-42.npy') ,
(13, '/home/guest/training/volume-13.npy', '/home/guest/training/segmentation-13.npy') ,
(107, '/home/guest/training/volume-107.npy', '/home/guest/training/segmentation-107.npy') ,
(112, '/home/guest/training/volume-112.npy', '/home/guest/training/segmentation-112.npy') ,
(92, '/home/guest/training/volume-92.npy', '/home/guest/training/segmentation-92.npy') ,
(110, '/home/guest/training/volume-110.npy', '/home/guest/training/segmentation-110.npy') ,
(8, '/home/guest/training/volume-8.npy', '/home/guest/training/segmentation-8.npy') ,
(72, '/home/guest/training/volume-72.npy', '/home/guest/training/segmentation-72.npy') ,
(56, '/home/guest/training/volume-56.npy', '/home/guest/training/segmentation-56.npy') ,
(115, '/home/guest/training/volume-115.npy', '/home/guest/training/segmentation-115.npy') ,
(57, '/home/guest/training/volume-57.npy', '/home/guest/training/segmentation-57.npy') ,
(109, '/home/guest/training/volume-109.npy', '/home/guest/training/segmentation-109.npy') ,
(118, '/home/guest/training/volume-118.npy', '/home/guest/training/segmentation-118.npy') ,
(90, '/home/guest/training/volume-90.npy', '/home/guest/training/segmentation-90.npy') ,
(76, '/home/guest/training/volume-76.npy', '/home/guest/training/segmentation-76.npy') ,
(68, '/home/guest/training/volume-68.npy', '/home/guest/training/segmentation-68.npy') ,
(119, '/home/guest/training/volume-119.npy', '/home/guest/training/segmentation-119.npy') ,
(58, '/home/guest/training/volume-58.npy', '/home/guest/training/segmentation-58.npy') ,
(73, '/home/guest/training/volume-73.npy', '/home/guest/training/segmentation-73.npy') ,
(116, '/home/guest/training/volume-116.npy', '/home/guest/training/segmentation-116.npy') ,
(47, '/home/guest/training/volume-47.npy', '/home/guest/training/segmentation-47.npy') ,
(66, '/home/guest/training/volume-66.npy', '/home/guest/training/segmentation-66.npy') ,
(94, '/home/guest/training/volume-94.npy', '/home/guest/training/segmentation-94.npy') ,
(38, '/home/guest/training/volume-38.npy', '/home/guest/training/segmentation-38.npy') ,
(130, '/home/guest/training/volume-130.npy', '/home/guest/training/segmentation-130.npy') ,
(71, '/home/guest/training/volume-71.npy', '/home/guest/training/segmentation-71.npy') ,
(20, '/home/guest/training/volume-20.npy', '/home/guest/training/segmentation-20.npy') ,
  • 2
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值