百度框架paddlepaddle实现改进三元组损失batch hard Triplet Loss

 

import paddle.fluid as fluid

def batch_hard_triplet_loss(input, y_true, margin,batch_size):
    """
    :param input: shape:[batch,feacture]
    :param y_true:shape:[batch,1],y_true is label
    :param margin:float
    :param batch_size:batch大小
    :return:tript_loss,shape:float
    """
    # y_true shape [64, 1]
    # y_true.t() shape [1, 64]
    # pos_mask shape [64, 64], 矩阵中对角线上值为1
    # pos_mask矩阵中 若[2,3]位置为1,代表labels中第2个值和第3个值得label相同
    y_true_trans = fluid.layers.transpose(y_true, [1, 0])
    y_true_trans_matrix = fluid.layers.concat([y_true_trans] * batch_size, axis=0)
    pos_mask = fluid.layers.cast(
        fluid.layers.equal(y_true_trans_matrix, y_true), "float32")

    def _mask_max(input_tensor, mask, axis=None, keepdims=False):
        # mask = 1(相同) 距离=input_tensor - 0
        # mask = 0(不同) 距离=input_tensor - 1
        # 距离最远的pos对
        input_tensor = input_tensor-(1-mask)*1e6
        _max = fluid.layers.reduce_max(input_tensor, dim=axis, keep_dim=keepdims)
        return _max

    def _mask_min(input_tensor, mask, axis=None, keepdims=False):
        input_tensor=input_tensor+mask*1e6
        _min = fluid.layers.reduce_min(input_tensor, dim=axis, keep_dim=keepdims)
        return _min


    a=fluid.layers.reduce_sum(fluid.layers.pow(input, 2.0), dim=1, keep_dim=True)
    b=fluid.layers.reduce_sum(fluid.layers.pow(fluid.layers.transpose(input, [1, 0]), 2.0), dim=0,keep_dim=True)
    b_matrix=fluid.layers.concat([b]*batch_size,axis=0)

    dist_squared=(b_matrix+a)-2*fluid.layers.matmul(input, input,transpose_y=True)
    dist=fluid.layers.elementwise_max(dist_squared,fluid.layers.fill_constant([1],dtype='float32',value=1e-16))
    dist = fluid.layers.sqrt(dist)#欧式距离

    pos_max = _mask_max(dist, pos_mask, axis=-1,keepdims=True)#pos_max.shape:(batch_size,1)
    neg_min = _mask_min(dist, pos_mask, axis=-1,keepdims=True)#neg_min.shape:(batch_size,1)

    basic_loss = (pos_max-neg_min)+margin#曼哈顿距离+margin
    #截断triplet_loss损失一***********************************************
    # clip_loss=fluid.layers.relu(basic_loss)
    # loss = fluid.layers.reduce_mean(clip_loss,dim=0)

    #log(1+exp(x))triplet_loss损失二**************************************
    loss = fluid.layers.reduce_mean(
        fluid.layers.log(1+fluid.layers.exp(basic_loss)), dim=0)
    # loss=fluid.layers.reduce_mean(
    #     fluid.layers.log(fluid.layers.elementwise_add(
    #         fluid.layers.exp(basic_loss),fluid.layers.fill_constant([1],dtype='float32',value=1))),dim=0)

    return loss

函数输入input是神经网络输出层的值,维度为[batch_size,feacture],y_true为标签,即batch_size个输出中每一个输出的类别,维度为[batch_size,1].

batch hard Triplet Loss的数学表述为:

tripletLoss数学公式

 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值