[复现]在colab上实现HistoSegNet

复现HistoSegNet: Semantic Segmentation of Histological Tissue Type in Whole Slide Images

关于论文的介绍后续补上,先占个坑
配置信息:colab

下面开始复现工作:

  • 经过之前的多次尝试,为了解决pydensecrf的问题,先安装conda.(参考链接:link
# 查看当前python配置文件路径 
!which python # should return /usr/local/bin/python
# 查看当前python版本
!python --version # should return Python 3.7.11
# 检查是否已设置PYTHONPATH变量
!echo $PYTHONPATH # should return /env/python
# 取消设置PYTHONPATH变量
%env PYTHONPATH= # should return env: PYTHONPATH=
# 安装Miniconda
%%bash
MINICONDA_INSTALLER_SCRIPT=Miniconda3-4.5.4-Linux-x86_64.sh
MINICONDA_PREFIX=/usr/local
wget https://repo.continuum.io/miniconda/$MINICONDA_INSTALLER_SCRIPT
chmod +x $MINICONDA_INSTALLER_SCRIPT
./$MINICONDA_INSTALLER_SCRIPT -b -f -p $MINICONDA_PREFIX

返回结果

# 查看conda
!which conda # should return /usr/local/bin/conda
# 查看python及当前版本
!which python # still returns /usr/local/bin/python
!python --version # now returns Python 3.6.5 :: Anaconda, Inc.
# 更新conda并将python版本改为3.5(实验要求)
%%bash
conda install --channel defaults conda python=3.5 --yes
conda update --channel defaults --all --yes
# 查看当前conda及python版本
!conda --version # now returns 4.5.11
!python --version # now returns Python 3.5.6 :: Anaconda, Inc.
# 添加系统路径
# 查看当前搜索列表
import sys
sys.path
# 将当前软件包目录添加到路径中
import sys
_ = (sys.path
        .append("/usr/local/lib/python3.5/site-packages"))
  • 安装实验要求的对应版本的包
# 先安装cython
!pip install cython
# 用conda安装pydensecrf
!conda install --channel conda-forge pydensecrf==1.0rc3 --yes
# 安装keras
!pip install keras==2.2.4
# 使用豆瓣镜像安装tensorflow(稍微快一点)
!pip install tensorflow==1.13.1 -i https://pypi.douban.com/simple
# 安装numpy、opencv、scipy、scikit、matplotlib
!pip install numpy==1.16.2
!pip install opencv-python==3.4.4.19
!pip install scipy==1.2.0
!pip install scikit-image==0.14.2
!pip install matplotlib==3.0.2
# 从github导入pydensecrf(不知道有用没 自行选择)
!pip install git+https://github.com/lucasb-eyer/pydensecrf.git
# 更新了一下cython(自行选择)
!sudo apt-get remove cython
!sudo pip install -U cython
# 导入实验代码
# 原作者的repo里数据集不全 我这里做了补充并修改了一些小错误
!git clone https://github.com/lilmercy/hsn.git
  • demo_01_segment_patches (通过HistoSegNet运行ADP调整集并定性、定量评估结果)
# setup
%matplotlib inline
import hsn
import pandas as pd
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from hsn.hsn1.adp import Atlas
from hsn.hsn1.utilities import *
from hsn.hsn1.histonet import HistoNet
from hsn.hsn1.gradcam import GradCAM
from hsn.hsn1.densecrf import DenseCRF
# 初始化HistoSegNet
'''
input_name -- 待处理的分割集  {'01_tuning_patch', '02_glas_full'}
input_size -- 输入图像的调整大小  [int > 0, int > 0]
input_mode -- 输入图像的类型 {'patch', 'wsi'}
down_fac -- 对输入图像进行下采样以确保等效分辨率 的标量 0 <= float <= 1
batch_size -- 输入图像的批量大小 int > 0
htt_mode -- 从图像中分割的类的类型  {'both', 'morph', 'func', 'glas'} 
			(both -- morph && func
			 morph -- morphological 形态学类型
			 func -- functional 功能类型
			 glas -- glandular 腺体or非腺体)
gt_mode -- 是否根据ground-truth注释评估分割结果 {'on', 'off'}
run_level -- 在HistoSegNet运行的最后阶段 {1, 2, 3}
			(1 -- the first stage CNN confidence scores 类置信得分阶段
			  2 -- the third stage modified Grad-CAMs 改良的梯度加权类激活映射阶段
			  3 -- the fourth stage dense CRF segmentation masks 密集条件随机场分割掩码)
save_types -- 要保存用于调试的类型 [{0, 1}, {0, 1}, {0, 1}, {0, 1}]
			 (HTT confidence scores: save (1), do not save (0)
			  Continuous Grad-CAMs: save (1), do not save (0)
			  Discrete segmentation masks: save (1), do not save (0)
			  Summary images: save (1), do not save (0)
verbosity -- 调试消息的详细程度 {'NORMAL', 'QUIET'}
'''
hsn = hsn.hsn1.hsn_v1.HistoSegNetV1(params={'input_name': '01_tuning_patch', 'input_size': [224, 224], 
                                   'input_mode': 'patch', 'down_fac': 1, 'batch_size': 16, 
                                   'htt_mode': 'both', 'gt_mode': 'on', 'run_level': 3, 
                                   'save_types': [0, 0, 1, 0], 'verbosity': 'QUIET'})
# 从文件中加载图像
hsn.find_img()
hsn.input_files_all[0]
# 导入preprocessing 解决keras no attribute to preprocessing的问题
from keras import preprocessing
# 获取对数逆类频率
hsn.analyze_img()
# 显示形态学类型的对数逆类频率
df = pd.DataFrame(data=hsn.httclass_loginvfreq[0], index=hsn.httclass_valid_classes[0], columns=np.array([hsn.htt_classes[0]]))
df
# 显示功能类型的对数逆类频率
df = pd.DataFrame(data=hsn.httclass_loginvfreq[1], index=hsn.httclass_valid_classes[1], columns=np.array([hsn.htt_classes[1]]))
df
# 加载CNN
hsn.load_histonet(params={'model_name': 'histonet_X1.7_clrdecay_5'})
print(hsn.hn.model.summary())

在这里插入图片描述

# 打印加载的类分数阈值
df = pd.DataFrame(data=hsn.hn.thresholds[0], index=hsn.atlas.level5)
df
# 解决“gradients”的相关错误
import keras.backend as K
import numpy as np
import cv2
from scipy.ndimage import gaussian_filter
import scipy
import matplotlib.pyplot as plt
import tensorflow as tf
#tf.compat.v1.disable_eager_execution()
# 定义Grad-CAM类
class GradCAM:
    """Class for Grad-CAM and HTT modifications"""

    def __init__(self, params):
        self.htt_mode = params['htt_mode']
        self.size = params['size']
        self.num_imgs = params['num_imgs']
        self.batch_size = params['batch_size']
        self.cnn_model = params['cnn_model']
        self.final_layer = params['final_layer']
        self.tmp_dir = params['tmp_dir']

    def gen_gradcam(self, pred_image_inds, pred_class_inds, pred_scores, input_images_norm, atlas, valid_classes):
        """Generate Grad-CAM

        Parameters
        ----------
        pred_image_inds : numpy 1D array (size: num_pass_threshold)
            The indices of the images
        pred_class_inds : numpy 1D array (size: num_pass_threshold)
            The indices of the predicted classes
        pred_scores : numpy 1D array (size: num_pass_threshold)
            The scores of the predicted classes
        input_images_norm : numpy 4D array (size: B x H x W x 3)
            The normalized input images
        atlas : hsn_v1.adp.Atlas object
            The Atlas of Digital Pathology object
        valid_classes : list
            The segmentation classes valid for the current problem

        Returns
        -------
        gradcam : numpy 3D array (size: num_pass_threshold x H x W)
            The Grad-CAM continuous values for predicted images/classes of the current batch
        """

        # Find number of HTTs across all images that passed their thresholds
        num_pass_threshold = len(pred_image_inds)

        gradcam = np.zeros((num_pass_threshold, self.size[0], self.size[1]))
        num_batches = (num_pass_threshold + self.batch_size - 1) // self.batch_size
        pred_scores_3d = np.expand_dims(np.expand_dims(pred_scores, axis=1), axis=1)
        pred_class_inds_full = atlas.convert_class_inds(pred_class_inds, valid_classes, atlas.level5)

        # For each batch, obtain Grad-CAM, then multiply by confidence score
        for iter_batch in range(num_batches):
            start = iter_batch * self.batch_size
            end = min((iter_batch + 1) * self.batch_size, num_pass_threshold)
            cur_gradcam_batch = self.grad_cam_batch(self.cnn_model, input_images_norm[pred_image_inds[start:end]],
                                                    pred_class_inds_full[start:end], self.final_layer)
            gradcam[start:end] = cur_gradcam_batch * pred_scores_3d[start:end]
        return gradcam

    def grad_cam_batch(self, input_model, images, classes, layer_name):
        """Generate Grad-CAM for a single batch of images

        Parameters
        ----------
        input_model : keras.engine.sequential.Sequential object
            The input model to run Grad-CAM on
        images : numpy 4D array (size: B x H x W x 3)
            The normalized input images in the current batch
        classes : numpy 1D array
            The indices of the predicted classes in the current batch
        layer_name : str
            The name of the model layer to run Grad-CAM on

        Returns
        -------
        heatmaps : numpy 3D array (size: B x H x W)
            The generated Grad-CAM for the current batch
        """

        y_c = tf.gather_nd(input_model.layers[-2].output, np.dstack([range(images.shape[0]), classes])[0])
        conv_output = input_model.get_layer(layer_name).output

        def normalize(x):
            # utility function to normalize a tensor by its L2 norm
            return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
        ############
        with tf.GradientTape() as gtape:
          grads = normalize(gtape.gradient(y_c, conv_output))
        #grads = normalize(K.gradients(y_c, conv_output))[0]
        gradient_function = K.function([input_model.layers[0].input], [conv_output, grads])

        output, grads_val = gradient_function([images, 0])
        weights = np.mean(grads_val, axis=(1, 2))
        cams = np.einsum('ijkl,il->ijk', output, weights)

        new_cams = np.empty((images.shape[0], images.shape[1], images.shape[2]))
        heatmaps = np.empty((images.shape[0], images.shape[1], images.shape[2]))
        for i in range(cams.shape[0]):
            new_cams[i] = cv2.resize(cams[i], (self.size[0], self.size[1]))
            new_cams[i] = np.maximum(new_cams[i], 0)
            heatmaps[i] = new_cams[i] / np.maximum(np.max(new_cams[i]), 1e-7)

        return heatmaps

    def expand_image_wise(self, gradcam_serial, pred_image_inds, pred_class_inds, valid_classes):
        """Expand the serialized Grad-CAM into 4D array, i.e. insert arrays of zeroes for unpredicted classes

        Parameters
        ----------
        gradcam_serial : numpy 3D array (size: self.num_imgs x W x H)
            The generated Grad-CAMs for predicted classes in the current batch, in serial form
        pred_image_inds : numpy 1D array (size: self.num_imgs)
            The indices of the images in the current batch, in serial form
        pred_class_inds : numpy 1D array (size: self.num_imgs)
            The indices of the predicted classes in the current batch, in serial form
        valid_classes : list
            The segmentation classes valid for the current problem

        Returns
        -------
        gradcam_image_wise : numpy 4D array (size: self.num_imgs x C x H x W), where C = number of classes
            The serialized Grad-CAM for the current batch
        """

        gradcam_image_wise = np.zeros((self.num_imgs, len(valid_classes), self.size[0], self.size[1]))
        for iter_input_file in range(self.num_imgs):
            # Convert serial indices to valid out indices
            cur_serial_inds = [i for i, x in enumerate(pred_image_inds) if x == iter_input_file]
            cur_class_inds = pred_class_inds[cur_serial_inds]
            if len(cur_class_inds) > 0:
                gradcam_image_wise[iter_input_file, cur_class_inds] = gradcam_serial[cur_serial_inds]
        return gradcam_image_wise

    def modify_by_htt(self, gradcam, images, atlas, htt_class, gradcam_adipose=None):
        """Generates non-foreground class activations and appends to the foreground class activations

        Parameters
        ----------
        gradcam : numpy 4D array (size: self.batch_size x C x W x H), where C = number of classes
            The serialized Grad-CAM for the current batch
        images : numpy 3D array (size: self.batch_size x W x H x 3)
            The input images for the current batch
        atlas : hsn_v1.adp.Atlas object
            The Atlas of Digital Pathology object
        htt_class : str
            The type of segmentation set to solve
        gradcam_adipose : numpy 4D array (size: self.num_imgs x C x H x W), where C = number of classes,
                          or None, optional
            Adipose class Grad-CAM (if segmenting functional types) or None (if not segmenting functional types)

        Returns
        -------
        gradcam : numpy 4D array (size: self.batch_size x C x W x H), where C = number of classes
            The modified Grad-CAM for the current batch, with non-foreground class activations appended
        """
        if htt_class == 'morph':
            background_max = 0.75
            background_exception_classes = ['A.W', 'A.B', 'A.M']
            classes = atlas.morph_valid_classes
        elif htt_class == 'func':
            background_max = 0.75
            other_tissue_mult = 0.05
            background_exception_classes = ['G.O', 'G.N', 'T']
            classes = atlas.func_valid_classes
            if gradcam_adipose is None:
                raise Exception('You must feed in adipose heatmap for functional type')
            other_ind = classes.index('Other')
        elif htt_class == 'glas':
            other_tissue_mult = 0.05
            classes = atlas.glas_valid_classes
            other_ind = classes.index('Other')
            # Get other tissue class prediction
            other_moh = np.max(gradcam, axis=1)
            other_gradcam = np.expand_dims(other_tissue_mult * (1 - other_moh), axis=1)
            other_gradcam = np.max(other_gradcam, axis=1)
            other_gradcam = np.clip(other_gradcam, 0, 1)
            gradcam[:, other_ind] = other_gradcam

        if htt_class in ['morph', 'func']:
            background_ind = classes.index('Background')

            # Get background class prediction
            sigmoid_input = 4 * (np.mean(images, axis=-1) - 240)
            background_gradcam = background_max * scipy.special.expit(sigmoid_input)
            background_exception_cur_inds = [i for i, x in enumerate(classes) if x in background_exception_classes]
            for iter_input_image in range(background_gradcam.shape[0]):
                background_gradcam[iter_input_image] = gaussian_filter(background_gradcam[iter_input_image], sigma=2)
            background_gradcam -= np.max(gradcam[:, background_exception_cur_inds], axis=1)
            background_gradcam = np.clip(background_gradcam, 0, 1)
            gradcam[:, background_ind] = background_gradcam

            # Get other tissue class prediction
            if htt_class == 'func':
                other_moh = np.max(gradcam, axis=1)
                other_gradcam = np.expand_dims(other_tissue_mult * (1 - other_moh), axis=1)
                other_gradcam = np.max(np.concatenate((other_gradcam, gradcam_adipose), axis=1), axis=1)
                other_gradcam = np.clip(other_gradcam, 0, 1)
                gradcam[:, other_ind] = other_gradcam
        return gradcam

    def get_cs_gradcam(self, gradcam, atlas, htt_class):
        """Performs class subtraction operation to modified Grad-CAM

        Parameters
        ----------
        gradcam : numpy 4D array (size: self.batch_size x C x W x H), where C = number of classes
            The modified Grad-CAM for the current batch, with non-foreground class activations appended
        atlas : hsn_v1.adp.Atlas object
            The Atlas of Digital Pathology object
        htt_class : str
            The type of segmentation set to solve

        Returns
        -------
        cs_gradcam : numpy 4D array (size: self.batch_size x C x W x H), where C = number of classes
            The class-subtracted Grad-CAM for the current batch
        """

        if htt_class == 'func':
            classes = atlas.func_valid_classes
            other_ind = classes.index('Other')
        elif htt_class == 'glas':
            classes = atlas.glas_valid_classes
            other_ind = classes.index('Other')
        class_inds = range(gradcam.shape[1])
        cs_gradcam = gradcam
        for iter_class in range(gradcam.shape[1]):
            if not (htt_class in ['func', 'glas'] and iter_class == other_ind):
                cs_gradcam[:, iter_class] -= np.max(gradcam[:, np.delete(class_inds, iter_class)], axis=1)
        cs_gradcam = np.clip(cs_gradcam, 0, 1)
        return cs_gradcam
# 解决“model.predict相关错误”
import os
import keras
import numpy as np
from keras.models import model_from_json
from keras import optimizers
import scipy
from scipy import io
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
# 定义HistoNet类
class HistoNet:
    """Class for implementing the classification CNN stage (HistoNet)"""

    def __init__(self, params):
        # Set constant parameters
        self.train_mean = 193.09203
        self.train_std = 56.450138

        # Set user-defined parameters
        self.model_dir = params['model_dir']
        self.model_name = params['model_name']
        self.batch_size = params['batch_size']
        self.relevant_inds = params['relevant_inds']
        self.input_name = params['input_name']
        self.class_names = params['class_names']

    def build_model(self):
        """Load model architecture, weights from file and compile the model"""

        # Load architecture from json
        model_json_path = os.path.join(self.model_dir, self.model_name + '.json')
        json_file = open(model_json_path, 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        self.model = model_from_json(loaded_model_json)

        # Load weights from h5
        model_h5_path = os.path.join(self.model_dir, self.model_name + '.h5')
        self.model.load_weights(model_h5_path)

        # Evaluate model
        opt = optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['binary_accuracy'])

    def normalize_image(self, X, is_glas=False):
        """Normalize the input images

        Parameters
        ----------
        X : numpy 3D array (size: W x H x 3)
            The input image, before normalizing
        is_glas : bool, optional
            True if segmenting GlaS images, False otherwise

        Returns
        -------
        Y : numpy 3D array (size: W x H x 3)
            The input image, after normalizing
        """

        if is_glas:
            # Clip values between 0 and 255
            X = np.clip(X, 0, 255)
        # Zero-mean, unit-variance normalization
        Y = (X - self.train_mean) / (self.train_std + 1e-7)
        return Y

    def load_thresholds(self, thresh_dir, model_name):
        """Load confidence score thresholds from file

        Parameters
        ----------
        thresh_dir : str
            File path to the directory holding the threshold file
        model_name : str
            The name of the model
        """

        thresh_path = os.path.join(thresh_dir, model_name)
        tmp = scipy.io.loadmat(thresh_path)
        self.thresholds = tmp.get('optimalScoreThresh')

    def predict(self, input_images, is_glas=False):
        """Predict classification CNN confidence scores on input images

        Parameters
        ----------
        input_images : numpy array (size: self.batch_size x W x H x 3)
            Input images, single batch
        is_glas : bool, optional
            True if segmenting GlaS images, False otherwise
        Returns
        -------
        pass_threshold_image_inds : numpy 1D array (size: num_pass_threshold)
            The indices of the images
        pass_threshold_class_inds : numpy 1D array (size: num_pass_threshold)
            The indices of the predicted classes
        pass_threshold_scores : numpy 1D array (size: num_pass_threshold)
            The scores of the predicted classes
        """
        #model = Sequential()
        #model.call = tf.function(model.call)
        ################## try to solve the error "model.predict" #####################################
        #json_file = open('/content/hsn/data/histonet_X1.7_clrdecay_5.json','r')
        #loaded_model_json = json_file.read()
        #json_file.close()
        #loaded_model = model_from_json(loaded_model_json)
        #load weights into new model
        #loaded_model.load_weights("/content/hsn/data/histonet_X1.7_clrdecay_5.h5")
        #print("Loaded Model from disk")
        #compile and evaluate loaded model
        #loaded_model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
        #######################################################################################
        #model.compile()
        #model.run_eagerly = True

        predicted_scores = self.model.predict(input_images, batch_size=self.batch_size)
        is_pass_threshold = np.greater_equal(predicted_scores, self.thresholds)
        if is_glas:
            exocrine_class_ind = self.class_names.index('G.O')
            is_pass_threshold[:, exocrine_class_ind] = True #
        (pass_threshold_image_inds, pass_threshold_class_inds) = np.where(is_pass_threshold)
        pass_threshold_scores = predicted_scores[is_pass_threshold]

        is_class_in_level3 = np.array([np.isin(x, self.relevant_inds) for i,x in enumerate(pass_threshold_class_inds)])
        pass_threshold_image_inds = pass_threshold_image_inds[is_class_in_level3]
        pass_threshold_class_inds = pass_threshold_class_inds[is_class_in_level3]
        pass_threshold_scores = pass_threshold_scores[is_class_in_level3]

        return pass_threshold_image_inds, pass_threshold_class_inds, pass_threshold_scores

    def split_by_htt_class(self, pred_image_inds, pred_class_inds, pred_scores, htt_mode, atlas):
        """Split predicted classes into morphological and functional classes

        Parameters
        ----------
        pred_image_inds : numpy 1D array (size: num_pass_threshold)
            The indices of the images
        pred_class_inds : numpy 1D array (size: num_pass_threshold)
            The indices of the predicted classes
        pred_scores : numpy 1D array (size: num_pass_threshold)
            The scores of the predicted classes
        htt_class : str
            The type of segmentation set to solve
        atlas : hsn_v1.adp.Atlas object
            The Atlas of Digital Pathology object

        Returns
        -------
        httclass_pred_image_inds :
        httclass_pred_class_inds :
        httclass_pred_scores :
        """

        httclass_pred_image_inds = []
        httclass_pred_class_inds = []
        httclass_pred_scores = []

        if htt_mode in ['glas']:
            glas_serial_inds = [i for i, x in enumerate(pred_class_inds) if atlas.level5[x] in atlas.glas_valid_classes]
            httclass_pred_image_inds.append(pred_image_inds[glas_serial_inds])
            pred_valid_class_inds = atlas.convert_class_inds(pred_class_inds[glas_serial_inds], atlas.level5,
                                                             atlas.glas_valid_classes)
            httclass_pred_class_inds.append(pred_valid_class_inds)
            httclass_pred_scores.append(pred_scores[glas_serial_inds])
        if htt_mode in ['both', 'morph']:
            morph_serial_inds = [i for i, x in enumerate(pred_class_inds) if atlas.level5[x] in
                                 atlas.morph_valid_classes]
            httclass_pred_image_inds.append(pred_image_inds[morph_serial_inds])
            pred_valid_class_inds = atlas.convert_class_inds(pred_class_inds[morph_serial_inds], atlas.level5,
                                                             atlas.morph_valid_classes)
            httclass_pred_class_inds.append(pred_valid_class_inds)
            httclass_pred_scores.append(pred_scores[morph_serial_inds])
        if htt_mode in ['both', 'func']:
            func_serial_inds = [i for i, x in enumerate(pred_class_inds) if atlas.level5[x] in atlas.func_valid_classes]
            httclass_pred_image_inds.append(pred_image_inds[func_serial_inds])
            pred_valid_class_inds = atlas.convert_class_inds(pred_class_inds[func_serial_inds], atlas.level5,
                                                             atlas.func_valid_classes)
            httclass_pred_class_inds.append(pred_valid_class_inds)
            httclass_pred_scores.append(pred_scores[func_serial_inds])

        if htt_mode == 'both' and sum([x.shape[0] for x in httclass_pred_image_inds]) != len(pred_image_inds):
            raise Exception('Error splitting Grad-CAM into HTT-class-specific Grad-CAMs: in and out sizes don\'t match')
        return httclass_pred_image_inds, httclass_pred_class_inds, httclass_pred_scores

    def find_final_layer(self):
        """Find the layer index of the last activation layer before the flatten layer"""
        is_after_flatten = False
        for iter_layer, layer in reversed(list(enumerate(self.model.layers))):
            if type(layer) == keras.layers.core.Flatten:
                is_after_flatten = True
            if is_after_flatten and type(layer) == keras.layers.core.Activation:
                return layer.name
        raise Exception('Could not find the final layer in provided HistoNet')
# 运行所有批量(目前model.predict错误还没解决)
confscores = np.zeros((len(hsn.input_files_all), len(hsn.hn.class_names)))
iou = {}
fiou = {}
miou = {}
num_batches = (len(hsn.input_files_all) + hsn.batch_size - 1) // hsn.batch_size
for iter_batch in range(num_batches):
    start = iter_batch * hsn.batch_size
    end = min((iter_batch + 1) * hsn.batch_size, len(hsn.input_files_all))
    hsn.input_files_batch = hsn.input_files_all[start:end]
    # Normalize image batch
    hsn.load_norm_imgs()
    # Load ground-truth annotations
    hsn.load_gt()
    # Segment image
    hsn.segment_img()
    # Evaluate segmentation
    iou['GradCAM'], fiou['GradCAM'], miou['GradCAM'] = hsn.eval_segmentation(hsn.intersect_counts['GradCAM'], hsn.union_counts['GradCAM'],
                                                                             hsn.confusion_matrix['GradCAM'], hsn.gt_counts['GradCAM'],
                                                                             httclass_pred_segmasks=hsn.ablative_segmasks['GradCAM'], tag_name='GradCAM')
    iou['Adjust'], fiou['Adjust'], miou['Adjust'] = hsn.eval_segmentation(hsn.intersect_counts['Adjust'], hsn.union_counts['Adjust'],
                                                                          hsn.confusion_matrix['Adjust'], hsn.gt_counts['Adjust'],
                                                                          httclass_pred_segmasks=hsn.ablative_segmasks['Adjust'], tag_name='Adjust')
    iou['CRF'], fiou['CRF'], miou['CRF'] = hsn.eval_segmentation(hsn.intersect_counts['CRF'], hsn.union_counts['CRF'],
                                                                 hsn.confusion_matrix['CRF'], hsn.gt_counts['CRF'],
                                                                 httclass_pred_segmasks=hsn.ablative_segmasks['CRF'], tag_name='CRF')

先更新到这里,等复现成功后更新~

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值