python之loss

介绍

softmax家族总结

one hot编码

def one_hot(label,classes,background_id,dtype=None):
    """
    dtype: label is integer,   classes is class counter,    background_id is background number
    label shape: -1    ndarray
    return
    label_onehot shape: -1 * classes    ndarray
    """
    if dtype is None:
        dtype=np.float32
    label_onehot = np.eye(classes,k=background_id,dtype=dtype)[label]
    return label_onehot

softmax

import numpy as np
def softmax(X,SUBMAX=True):
    """dtype: float
    X shape: batch * -1 * classes    ndarray
    or     :         -1 * classes    ndarray
    or     :              classes    ndarray
    """
    orig_shape,orig_dtype = X.shape,X.dtype
    if SUBMAX:  #  To prevent overflow
        X_exp = np.exp(X - np.max(X,axis=-1,keepdims=True))
        X_exp_sum = np.sum(X_exp,axis=-1,keepdims=True)
    else:
        X_exp = np.exp(X)    
        X_exp_sum = np.sum(X_exp,axis=-1,keepdims=True)
    X = X_exp / X_exp_sum
    assert X.shape == orig_shape
    return X.astype(orig_dtype)

softmaxloss

def cross_entropy_error(y, t):
    delta = 1e-8                           # prevent log is infinity
    return - np.sum(t * np.log(y + delta)) # np.log is ln

def softmax_loss(x, t, t_is_onehot=False,background_id=0):
    """dtype : float
    or       :         -1 * classes    ndarray
    """
    if not t_is_onehot:
        classes = x.shape[1]
        background_id = 0
        t = one_hot(t,classes,background_id)

    y = softmax(x)
    loss = cross_entropy_error(y,t)
    return loss
class SoftmaxLoss:
    def __init__(self,classes,background=0,onehot=False):
        self.classes = classes
        self.background = background
        self.onehot = onehot
        self.delta = 1e-8  # prevent log is infinity
        self.loss = None
        self.y = None      # softmax output
        self.t = None      # label(one-hot)

    def forward(self, x, t):
        """dtype : float
        x   :     -1 * classes    ndarray
        t   :     -1              ndarray                    self.onehot=True
        or  :     -1 * classes    ndarray  one_hot format    self.onehot=False
        """
        if not self.onehot:
            t = one_hot(t,self.classes,self.background,dtype=x.dtype)
        self.t = t
        self.y = softmax(x)
        loss_vec = - t * np.log(self.y + self.delta)
        loss_vec = np.max(loss_vec,axis=-1)
        loss = np.sum(loss_vec)

        return loss_vec, loss

    def backward(self, mean=False):
        dx = (self.y - self.t)
        if mean:
            batch_size = float(self.t.shape[0])
            dx /= batch_size      # calcu mean diff

        return dx

smoth L1 loss

公式参考

def smooth_l1_loss(x,y):
    """dtype : float
    x shape  : ndarray
    y shape  : ndarray
    """
    diff = x - y
    diff_abs = abs(diff)

    errorData = diff_abs.copy()
    errorData[errorData < 1] = 0.5 * diff_abs[diff_abs < 1] ** 2
    errorData[errorData >= 1] = diff_abs[diff_abs >= 1] ** 2 - 0.5
    loss = np.sum(errorData)

    diff[diff_abs >= 1] = np.sign(diff[diff_abs >= 1])

    return loss, diff
©️2020 CSDN 皮肤主题: 大白 设计师:CSDN官方博客 返回首页