损失函数python实现

这是从GitHub上下载的loss函数
适合二分类,多分类得自己从新修改。
交叉熵素损失函数

def pixel_wise_softmax(output_map):
    exponential_map = tf.exp(output_map)
    sum_exp = tf.reduce_sum(exponential_map, 3, keep_dims=True)
    return tf.div(exponential_map, sum_exp)

def xentropy_loss(mask, prediction, n_class):
    mask = tf.reshape(mask, [-1, n_class])
    mask = tf.cast(mask, tf.float32)
    pred = tf.reshape(pixel_wise_softmax(prediction), [-1, n_class])
    pred = tf.cast(pred,tf.float32)
    loss_temp = mask * tf.log(tf.clip_by_value(pred, 1e-10, 1.0))
    return -tf.reduce_mean(loss_temp)
    

l2正则化

def l2_regular():
    total_vars = tf.trainable_variables()
    weights_name_list = [var for var in total_vars if "kernel" in var.name]
    loss_holder = []
    for w in range(len(weights_name_list)):
        l2_loss = tf.nn.l2_loss(weights_name_list[w])
        loss_holder.append(l2_loss)
    regular_loss = tf.reduce_mean(loss_holder) * 0.001
    return regular_loss

dice损失函数

def dice_loss_3d(Y_gt, Y_pred):
    Z, H, W, C = Y_gt.get_shape().as_list()[1:]
    smooth = 1e-5
    pred_flat = tf.reshape(Y_pred, [-1, H * W * C * Z])
    true_flat = tf.reshape(Y_gt, [-1, H * W * C * Z])
    intersection = 2 * tf.reduce_sum(pred_flat * true_flat, axis=1) + smooth
    denominator = tf.reduce_sum(pred_flat, axis=1) + tf.reduce_sum(true_flat, axis=1) + smooth
    loss = 1 - tf.reduce_mean(intersection / denominator)
    return loss
def dice_loss_2d(Y_gt, Y_pred):
    H, W, C = Y_gt.get_shape().as_list()[1:]
    smooth = 1e-5
    pred_flat = tf.reshape(Y_pred, [-1, H * W * C])
    true_flat = tf.reshape(Y_gt, [-1, H * W * C])
    intersection = 2 * tf.reduce_sum(pred_flat * true_flat, axis=1) + smooth
    denominator = tf.reduce_sum(pred_flat, axis=1) + tf.reduce_sum(true_flat, axis=1) + smooth
    loss = 1 - tf.reduce_mean(intersection / denominator)
    return loss

tversky loss

def tversky_loss_3d(Y_gt, Y_pred, alpha=0.7):
    Z, H, W, C = Y_gt.get_shape().as_list()[1:]
    smooth = 1e-5
    y_pred_pos = tf.reshape(Y_pred, [-1, H * W * C * Z])
    y_true_pos = tf.reshape(Y_gt, [-1, H * W * C * Z])
    true_pos = tf.reduce_sum(y_true_pos * y_pred_pos, axis=1)
    false_neg = tf.reduce_sum(y_true_pos * (1 - y_pred_pos), axis=1)
    false_pos = tf.reduce_sum((1 - y_true_pos) * y_pred_pos, axis=1)
    tversky = (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)
    loss = 1 - tf.reduce_mean(tversky)
    return loss

def tversky_loss_2d(Y_gt, Y_pred, alpha=0.7):
    H, W, C = Y_gt.get_shape().as_list()[1:]
    smooth = 1e-5
    y_pred_pos = tf.reshape(Y_pred, [-1, H * W * C])
    y_true_pos = tf.reshape(Y_gt, [-1, H * W * C])
    true_pos = tf.reduce_sum(y_true_pos * y_pred_pos, axis=1)
    false_neg = tf.reduce_sum(y_true_pos * (1 - y_pred_pos), axis=1)
    false_pos = tf.reduce_sum((1 - y_true_pos) * y_pred_pos, axis=1)
    tversky = (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)
    loss = 1 - tf.reduce_mean(tversky)
    return loss

focal loss

def focal_tversky_3d(Y_gt, Y_pred, alpha=0.7, gamma=0.75):
    Z, H, W, C = Y_gt.get_shape().as_list()[1:]
    smooth = 1e-5
    y_pred_pos = tf.reshape(Y_pred, [-1, H * W * C * Z])
    y_true_pos = tf.reshape(Y_gt, [-1, H * W * C * Z])
    true_pos = tf.reduce_sum(y_true_pos * y_pred_pos, axis=1)
    false_neg = tf.reduce_sum(y_true_pos * (1 - y_pred_pos), axis=1)
    false_pos = tf.reduce_sum((1 - y_true_pos) * y_pred_pos, axis=1)
    tversky = (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)
    loss = 1 - tf.reduce_mean(tversky)
    loss = tf.pow(loss, gamma)
    return loss


def focal_tversky_2d(Y_gt, Y_pred, alpha=0.7, gamma=0.75):
    H, W, C = Y_gt.get_shape().as_list()[1:]
    smooth = 1e-5
    y_pred_pos = tf.reshape(Y_pred, [-1, H * W * C])
    y_true_pos = tf.reshape(Y_gt, [-1, H * W * C])
    true_pos = tf.reduce_sum(y_true_pos * y_pred_pos, axis=1)
    false_neg = tf.reduce_sum(y_true_pos * (1 - y_pred_pos), axis=1)
    false_pos = tf.reduce_sum((1 - y_true_pos) * y_pred_pos, axis=1)
    tversky = (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)
    loss = 1 - tf.reduce_mean(tversky)
    loss = tf.pow(loss, gamma)
    return loss
def generalised_dice_loss_3d(Y_gt, Y_pred):
    smooth = 1e-5
    w = tf.reduce_sum(Y_gt, axis=[1, 2, 3])
    w = 1 / (w ** 2 + smooth)

    numerator = Y_gt * Y_pred
    numerator = w * tf.reduce_sum(numerator, axis=[1, 2, 3])
    numerator = tf.reduce_sum(numerator, axis=1)

    denominator = Y_pred + Y_gt
    denominator = w * tf.reduce_sum(denominator, axis=[1, 2, 3])
    denominator = tf.reduce_sum(denominator, axis=1)

    gen_dice_coef = 2 * numerator / (denominator + smooth)
    loss = tf.reduce_mean(1 - gen_dice_coef)
    return loss


def generalised_dice_loss_2d_ein(Y_gt, Y_pred):
    Y_gt = tf.cast(Y_gt, 'float32')
    Y_pred = tf.cast(Y_pred, 'float32')
    w = tf.einsum("bwhc->bc", Y_gt)
    w = 1 / ((w + 1e-10) ** 2)
    intersection = w * tf.einsum("bwhc,bwhc->bc", Y_pred, Y_gt)
    union = w * (tf.einsum("bwhc->bc", Y_pred) + tf.einsum("bwhc->bc", Y_gt))

    divided = 1 - 2 * (tf.einsum("bc->b", intersection) + 1e-10) / (tf.einsum("bc->b", union) + 1e-10)

    loss = tf.reduce_mean(divided)
    return loss


def generalised_dice_loss_2d(Y_gt, Y_pred):
    smooth = 1e-5
    w = tf.reduce_sum(Y_gt, axis=[1, 2])
    w = 1 / (w ** 2 + smooth)

    numerator = Y_gt * Y_pred
    numerator = w * tf.reduce_sum(numerator, axis=[1, 2])
    numerator = tf.reduce_sum(numerator, axis=1)

    denominator = Y_pred + Y_gt
    denominator = w * tf.reduce_sum(denominator, axis=[1, 2])
    denominator = tf.reduce_sum(denominator, axis=1)

    gen_dice_coef = 2 * numerator / (denominator + smooth)
    loss = tf.reduce_mean(1 - gen_dice_coef)
    return loss


def surface_loss_3d(Y_gt, Y_pred):
    multipled = tf.reduce_sum(Y_gt * Y_pred, axis=[0,1, 2, 3, 4])
    loss = tf.reduce_mean(multipled)
    return loss


def surface_loss_2d(Y_gt, Y_pred):
    multipled = tf.reduce_sum(Y_gt * Y_pred, axis=[0,1, 2, 3])
    loss = tf.reduce_mean(multipled)
    return loss

  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值