PyTorch教程(9)损失函数之番外篇

在学习Pytorch时,我发现它的一些损失函数不是很容易从文档中理解的。为了帮助自己理解,我用纯Python和Numpy编写了Pytorch的所有损失函数,同时确认结果是相同的。在我看来,Python代码似乎比数学公式更容易理解,尤其是在运行和更改它们时。

import numpy as np
import torch
import torch.nn as nn

print(torch.__version__)
# L1Loss
x = torch.randn(2, 3)
y = torch.randn(2, 3)
print(x)
print(y)
print(nn.L1Loss()(x, y))
print(nn.L1Loss(reduce=False)(x, y))
print(abs(x.numpy() - y.numpy()))
print(abs(x.numpy() - y.numpy()).mean())


# MSELoss
x = torch.randn(2, 3)
y = torch.randn(2, 3)
print(x)
print(y)
print(nn.MSELoss(reduce=False)(x, y))
print(nn.MSELoss()(x, y))
print((x.numpy() - y.numpy())**2)
print(((x.numpy() - y.numpy())**2).mean())

# CrossEntropyLoss
x = torch.randn(2, 4)
y = torch.LongTensor(2).random_(4)
print(x)
print(y)
print(nn.CrossEntropyLoss(reduce=False)(x, y))
print(nn.CrossEntropyLoss()(x, y))
x = x.numpy()
y = y.numpy()

lst = []
for k in range(len(x)):
    lst.append(-np.log(np.exp(x[k][y[k]]) / np.exp(x[k]).sum()))
print(lst, np.mean(lst))

# NLLLoss
# Logsoftmax
x = torch.randn(2, 4)
print(x)
y = nn.LogSoftmax(dim=1)(x)
print(y)
x = x.numpy()
lst = []
for k in range(len(x)):
    lst.append(np.log( np.exp(x[k]) / np.exp(x[k]).sum()))
print(lst)
# NLLLoss
x0 = torch.randn(3, 4)
x = nn.LogSoftmax(dim=1)(x0)
print(x0, x)
y = torch.LongTensor(3).random_(4)
print(y)
print(nn.NLLLoss()(x, y))
print(nn.NLLLoss(reduce=False)(x, y))
for k in range(len(x)):
    lst.append(-x[k][y[k]])

print(lst, np.mean(lst))

# PoissonNLLLoss
x = torch.randn(2, 4)
print(x)
y = torch.randn(2, 4)
print(y)
print(nn.PoissonNLLLoss()(x, y))
print(nn.PoissonNLLLoss(reduce=False)(x, y))
x = x.numpy()
y = y.numpy()
# target∗log(target)−target+0.5∗log(2πtarget)
def sterling_approx(y):
    return y*np.log(y) - y + 0.5*np.log(np.pi*y)
lst = []
for k in range(len(x)):
    lsti = []
    for i in range(len(x[k])):
        lss = np.exp(x[k,i])-y[k,i]*x[k,i] + (sterling_approx(y[k,i]) if y[k,i]>1 else 0)
        lsti.append(lss)
    lst.append(lsti)
print(np.array(lst))
print(np.mean(lst))

# KLDivLoss
x = torch.rand(2, 3)
y = torch.rand(2, 3)
print(x)
xlog = torch.log(x)
print(xlog)
print(y)
print(nn.KLDivLoss()(xlog, y))
print(nn.KLDivLoss(reduce=False)(xlog, y))
x = x.numpy()
xlog = np.log(x)
y = y.numpy()
lst = []
for i in range(len(x)):
    lsti = []
    for j in range(len(x[i])):
        # xi is already log 
        lsti.append(y[i][j] * (np.log(y[i][j]) - xlog[i][j]))
    lst.append(lsti)
print(np.array(lst))
print(np.mean(lst))

# BCELoss
# Sigmoid
x = torch.randn(2, 4)
y = nn.Sigmoid()(x)
print(x)
print(y)
x = x.numpy()
print(1 / (1 + np.exp(-x)))
# single label
x0 = torch.randn(3)
x = nn.Sigmoid()(x0)
print(x)
y = torch.FloatTensor(3).random_(2)
print(y)
print(nn.BCELoss()(x, y))
print(nn.BCELoss(reduce=False)(x, y))
loss = nn.BCELoss(size_average=False)
lss = loss(x, y)
print(lss)
x = x.numpy()
y = y.numpy()
lst = []
for i in range(len(x)):
    lst.append(-np.log(x[i]) if y[i]==1 else -np.log(1-x[i]))
print(lst, np.mean(lst))
# Equivalently
lst = []
for i in range(len(x)):
    lst.append(-np.log(x[i])*y[i] + -np.log(1-x[i])*(1-y[i]))
print(lst, np.mean(lst))

# multilabel
x0 = torch.randn(3, 2)
x = nn.Sigmoid()(x0)
print(x)
y = torch.FloatTensor(3, 2).random_(2)
print(y)
print(nn.BCELoss()(x, y))
print(nn.BCELoss(reduce=False)(x, y))
x = x.numpy()
y = y.numpy()
lst = []
for i in range(len(x)):
    lsti = []
    for j in range(len(x[i])):
        lsti.append(-np.log(x[i][j]) if y[i][j]==1 else -np.log(1-x[i][j]))
    lst.append(lsti)
print(np.array(lst), np.mean(lst))
# Equivalently
lst = []
for i in range(len(x)):
    lst.append(-np.log(x[i])*y[i] + -np.log(1-x[i])*(1-y[i]))
print(np.array(lst), np.mean(lst))

# BCEWithLogitsLoss
# 这只是简单地在上面的BCELoss前面添加一个sigmoid。
#single label
x = torch.randn(3)
print(x)
xs = nn.Sigmoid()(x)
print(xs)
y = torch.FloatTensor(3).random_(2)
print(y)
print(nn.BCELoss()(xs, y))
print(nn.BCEWithLogitsLoss()(x, y))
#multilabel
x = torch.randn(3, 2)
print(x)
xs = nn.Sigmoid()(x)
print(xs)
y = torch.FloatTensor(3, 2).random_(2)
print(y)
print(nn.BCELoss()(xs, y))
print(nn.BCEWithLogitsLoss()(x, y))

# MarginRankingLoss
x1 = torch.randn(3)
x2 = torch.randn(3)
y = torch.FloatTensor(np.random.choice([1, -1], 3))

print(x1, x2, y)
print(nn.MarginRankingLoss(margin=0.1)(x1, x2, y))
x1 = x1.numpy()
x2 = x2.numpy()
y = y.numpy()
margin=0.1
lst = []
for i in range(len(x1)):
    lst.append(max(0, -y[i]*(x1[i]-x2[i]) + margin))

print(lst, np.mean(lst))

# HingeEmbeddingLoss
x = torch.randn(2, 3)
y = torch.FloatTensor(np.random.choice([-1, 1], (2, 3)))
print(x)
print(y)
print(nn.HingeEmbeddingLoss(margin=1)(x, y))
x = x.numpy()
y = y.numpy()
margin=1
lst=[]

for i in range(len(x)):
    lsti = []
    for j in range(len(x[i])):
        if y[i][j]==1:
            lsti.append(x[i][j])
        else:
            lsti.append(max(0, margin-x[i][j]))
    lst.append(lsti)
print(np.array(lst))
print(np.mean(lst))

# MultiLabelMarginLoss
# one-sample example
x = torch.randn(1, 4)
y = torch.LongTensor(1, 4).random_(-1, 4)
print(x)
print(y)
print(nn.MultiLabelMarginLoss()(x, y))
x = x.numpy()
y = y.numpy()
lst = []
for k in range(len(x)):
    sm = 0
    js = []
    for j in range(len(y[k])):
        if y[k][j]<0: break 
        js.append(y[k][j])
    for i in range(len(x[k])):
        for j in js:
            if (i not in js) and (i!=j):
                print(i, j)
                sm += max(0, 1-(x[k][j] - x[k][i]))
    lst.append(sm/len(x[k]))
print(lst, np.mean(lst))
# multi-sample example
x = torch.randn(3, 4)
y = torch.LongTensor(3, 4).random_(-1, 4)
print(x)
print(y)
print(nn.MultiLabelMarginLoss()(x, y))
x = x.numpy()
y = y.numpy()
lst = []
for k in range(len(x)):
    sm = 0
    js = []
    for j in range(len(y[k])):
        if y[k][j]<0: break 
        js.append(y[k][j])
    for i in range(len(x[k])):
        for j in js:
            if (i not in js) and (i!=j):
                sm += max(0, 1-(x[k][j] - x[k][i]))
    lst.append(sm/len(x[k]))
print(lst, np.mean(lst))

# SmoothL1Loss
x = torch.randn(2, 3)
y = torch.randn(2, 3)
print(nn.SmoothL1Loss()(x, y))
print(nn.SmoothL1Loss(reduce=False)(x, y))
x = x.numpy() 
y = y.numpy()
def smoothl1loss(x, y):
    if abs(x-y)<1: return 1/2*(x-y)**2
    else: return abs(x-y)-1/2
lst = []
for i in range(len(x)):
    lsti=[]
    for j in range(len(x[i])):
        lsti.append(smoothl1loss(x[i][j], y[i][j]))
    lst.append(lsti)
print(np.array(lst), np.mean(lst))

# SoftMarginLoss
x = torch.randn(2, 4)
y = torch.FloatTensor(np.random.choice([-1, 1], (2, 4)))
print(x)
print(y)
print(nn.SoftMarginLoss()(x, y))
x = x.numpy()
y = y.numpy()
lst = []
for k in range(len(x)):
    sm = 0
    for i in range(len(x[k])):
        sm += np.log(1 + np.exp(-y[k][i]*x[k][i]))
    lst.append(sm/len(x[k]))

print(lst, np.mean(lst))

# MultiLabelSoftMarginLoss
x = torch.randn(2, 4)
y = torch.FloatTensor(2, 4).random_(2)
print(x)
print(y)
x = x.numpy()
y = y.numpy()
lst = []
for k in range(len(x)):
    sm = 0
    for i in range(len(x[k])):
        sm -= y[k, i]*np.log(np.exp(x[k, i])/(1+np.exp(x[k, i]))) +\
            (1-y[k, i])*np.log(1/(1+np.exp(x[k, i])))
    lst.append(sm/len(x[k]))

print(lst, np.mean(lst))

# CosineEmbeddingLoss
x1 = torch.randn(2, 3)
x2 = torch.randn(2, 3)
y = torch.FloatTensor(np.random.choice([1, -1], 2))
print(x1)
print(x2)
print(y)
print(nn.CosineEmbeddingLoss(margin=0.1)(x1, x2, y))
x1 = x1.numpy()
x2 = x2.numpy()
y = y.numpy()
margin=0.1
from scipy.spatial.distance import cosine
def cos(x, y): return 1-cosine(x, y)
lst = []
for k in range(len(x1)):
    if y[k] == 1: lst.append(1-cos(x1[k], x2[k]))
    elif y[k] == -1: lst.append(max(0, cos(x1[k], x2[k])-margin))
print(lst, np.mean(lst))

# MultiMarginLoss
x = torch.randn(2, 4)
y = torch.LongTensor(2).random_(4)
print(x)
print(y)
print(nn.MultiMarginLoss(margin=0.9, p=2)(x, y))
x = x.numpy()
y = y.numpy()
p=2
margin=0.9
lst = []
for k in range(len(x)):
    sm = 0
    for i in range(len(x[k])):
        if i!= y[k]:
            sm += max(0, (margin - x[k, y[k]] + x[k, i])**p)
    lst.append(sm/len(x[k]))

print(lst, np.mean(lst))

# TripletMarginLoss
x1 = torch.randn(2, 3)
x2 = torch.randn(2, 3)
x3 = torch.randn(2, 3)
margin = 0.9
p = 2

print(x1)
print(nn.TripletMarginLoss(margin=margin, p=p)(x1, x2, x3))
x1 = x1.numpy()
x2 = x2.numpy()
x3 = x3.numpy()
def d(x1, x2, p):
    return sum((x1-x2)**p)**(1/p)
lst = []
for k in range(len(x1)):
    sm = 0
    for i in range(len(x1[k])):
        sm += max(d(x1[k], x2[k], p)-d(x1[k], x3[k], p)+margin, 0) 
    lst.append(sm/len(x1[k]))

print(lst, np.mean(lst))


#Focal Loss
class FocalLoss(nn.Module):
    # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
    def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
        super(FocalLoss, self).__init__()
        self.loss_fcn = loss_fcn  # must be nn.BCEWithLogitsLoss()
        self.gamma = gamma
        self.alpha = alpha
        self.reduction = loss_fcn.reduction
        self.loss_fcn.reduction = 'none'  # required to apply FL to each element

    def forward(self, pred, true):
        loss = self.loss_fcn(pred, true)
        # p_t = torch.exp(-loss)
        # loss *= self.alpha * (1.000001 - p_t) ** self.gamma  # non-zero power for gradient stability

        # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
        pred_prob = torch.sigmoid(pred)  # prob from logits
        p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
        alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
        modulating_factor = (1.0 - p_t) ** self.gamma
        loss *= alpha_factor * modulating_factor

        if self.reduction == 'mean':
            return loss.mean()
        elif self.reduction == 'sum':
            return loss.sum()
        else:  # 'none'
            return loss

参考目录

https://zhang-yang.medium.com/pytorch-loss-funtions-in-plain-python-b79c05f8b53f

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值