dice loss可以理解为检测任务重的IOU,用于分割任务中可以有效缓解正样本占比小的case。比如边缘的分割、小目标的分割。
前景分割:
#https://stackoverflow.com/questions/72195156/correct-implementation-of-dice-loss-in-tensorflow-keras
def dice_coef(y_true, y_pred, smooth=100):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
dice = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return dice
def dice_coef_loss(y_true, y_pred):
return 1-dice_coef(y_true, y_pred)
边缘+前景分割:
edge_kernel = np.array([1,1,1,1,-8,1,1,1,1], dtype=np.float32).reshape([3,3,1,1])
edge_kernel_const = tf.constant(edge_kernel)
def edge_dice_coef_loss(y_true, y_pred):
y_true_edge = tf.nn.conv2d(y_true, edge_kernel_const, strides=1, padding='SAME')
y_true_edge = 1. - tf.cast(tf.equal(y_true_edge, 0), dtype=tf.float32)#gt只有0、1
y_pred_edge = tf.nn.conv2d(y_pred, edge_kernel_const, strides=1, padding='SAME')
y_pred_edge = tf.pow(y_pred_edge, 2)#拉大边缘和非边缘区的值
y_pred_edge = y_pred_edge/(y_pred_edge + 0.01) # 6/(6+0.01)=0.99, 0.01/(0.01+0.01)=0.5, 0.001/(0.001+0.01)=0.0
dice_loss = 1-dice_coef(y_true_edge, y_pred_edge)
bce = tf.keras.losses.BinaryCrossentropy()
bce_loss = bce(y_true, y_pred)
return dice_loss + bce_loss