python之loss

介绍

softmax家族总结

one hot编码

def one_hot(label,classes,background_id,dtype=None):
    """
    dtype: label is integer,   classes is class counter,    background_id is background number
    label shape: -1    ndarray
    return
    label_onehot shape: -1 * classes    ndarray
    """
    if dtype is None:
        dtype=np.float32
    label_onehot = np.eye(classes,k=background_id,dtype=dtype)[label]
    return label_onehot

softmax

import numpy as np
def softmax(X,SUBMAX=True):
    """dtype: float
    X shape: batch * -1 * classes    ndarray
    or     :         -1 * classes    ndarray
    or     :              classes    ndarray
    """
    orig_shape,orig_dtype = X.shape,X.dtype
    if SUBMAX:  #  To prevent overflow
        X_exp = np.exp(X - np.max(X,axis=-1,keepdims=True))
        X_exp_sum = np.sum(X_exp,axis=-1,keepdims=True)
    else:
        X_exp = np.exp(X)    
        X_exp_sum = np.sum(X_exp,axis=-1,keepdims=True)
    X = X_exp / X_exp_sum
    assert X.shape == orig_shape
    return X.astype(orig_dtype)

softmaxloss

def cross_entropy_error(y, t):
    delta = 1e-8                           # prevent log is infinity
    return - np.sum(t * np.log(y + delta)) # np.log is ln

def softmax_loss(x, t, t_is_onehot=False,background_id=0):
    """dtype : float
    or       :         -1 * classes    ndarray
    """
    if not t_is_onehot:
        classes = x.shape[1]
        background_id = 0
        t = one_hot(t,classes,background_id)

    y = softmax(x)
    loss = cross_entropy_error(y,t)
    return loss
class SoftmaxLoss:
    def __init__(self,classes,background=0,onehot=False):
        self.classes = classes
        self.background = background
        self.onehot = onehot
        self.delta = 1e-8  # prevent log is infinity
        self.loss = None
        self.y = None      # softmax output
        self.t = None      # label(one-hot)

    def forward(self, x, t):
        """dtype : float
        x   :     -1 * classes    ndarray
        t   :     -1              ndarray                    self.onehot=True
        or  :     -1 * classes    ndarray  one_hot format    self.onehot=False
        """
        if not self.onehot:
            t = one_hot(t,self.classes,self.background,dtype=x.dtype)
        self.t = t
        self.y = softmax(x)
        loss_vec = - t * np.log(self.y + self.delta)
        loss_vec = np.max(loss_vec,axis=-1)
        loss = np.sum(loss_vec)

        return loss_vec, loss

    def backward(self, mean=False):
        dx = (self.y - self.t)
        if mean:
            batch_size = float(self.t.shape[0])
            dx /= batch_size      # calcu mean diff

        return dx

smoth L1 loss

公式参考

def smooth_l1_loss(x,y):
    """dtype : float
    x shape  : ndarray
    y shape  : ndarray
    """
    diff = x - y
    diff_abs = abs(diff)

    errorData = diff_abs.copy()
    errorData[errorData < 1] = 0.5 * diff_abs[diff_abs < 1] ** 2
    errorData[errorData >= 1] = diff_abs[diff_abs >= 1] ** 2 - 0.5
    loss = np.sum(errorData)

    diff[diff_abs >= 1] = np.sign(diff[diff_abs >= 1])

    return loss, diff
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

阿尔发go

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值