任务简介:
本节学习pytorch中剩下的14种损失函数。
详细说明:
接上一节,本节学习pytorch的另外14种损失函数:
-
nn.L1Loss
-
nn.MSELoss
-
nn.SmoothL1Loss
-
nn.PoissonNLLLoss
-
nn.KLDivLoss
-
nn.MarginRankingLoss
-
nn.MultiLabelMarginLoss
-
nn.SoftMarginLoss
-
nn.MultiLabelSoftMarginLoss
-
nn.MultiMarginLoss
-
nn.TripletMarginLoss
-
nn.HingeEmbeddingLoss
-
nn.CosineEmbeddingLoss
-
nn.CTCLoss
三、PyTorch中的损失函数
5. nn.L1Loss
6. nn.MSELoss
测试L1Loss和MSELoss代码:
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
from tools.common_tools import set_seed
set_seed(1) # 设置随机种子
# ------------------------------------------------- 5 L1 loss ----------------------------------------------
# flag = 0
flag = 1
if flag:
inputs = torch.ones((2, 2))
target = torch.ones((2, 2)) * 3
loss_f = nn.L1Loss(reduction='none')
loss = loss_f(inputs, target)
print("input:{}\ntarget:{}\nL1 loss:{}".format(inputs, target, loss))
输出:
input:tensor([[1., 1.],
[1., 1.]])
target:tensor([[3., 3.],
[3., 3.]])
L1 loss:tensor([[2., 2.],
[2., 2.]])
MSE loss:tensor([[4., 4.],
[4., 4.]])
7. nn.SmoothL1Loss
测试代码:
# ------------------------------------------------- 7 Smooth L1 loss ----------------------------------------------
# flag = 0
flag = 1
if flag:
inputs = torch.linspace(-3, 3, steps=500)
target = torch.zeros_like(inputs)
loss_f = nn.SmoothL1Loss(reduction='none')
loss_smooth = loss_f(inputs, target)
loss_l1 = np.abs(inputs.numpy())
plt.plot(inputs.numpy(), loss_smooth.numpy(), label='Smooth L1 Loss')
plt.plot(inputs.numpy(), loss_l1, label='L1 loss')
plt.xlabel('x_i - y_i')
plt.ylabel('loss value')
plt.legend()
plt.grid()
plt.show()
输出:
8. nn.PoissonNLLLoss
测试代码:
# ------------------------------------------------- 8 Poisson NLL Loss ----------------------------------------------
# flag = 0
flag = 1
if flag:
inputs = torch.randn((2, 2))
target = torch.randn((2, 2))
loss_f = nn.PoissonNLLLoss(log_input=True, full=False, reduction='none')
loss = loss_f(inputs, target)
print("input:{}\ntarget:{}\nPoisson NLL loss:{}".format(inputs, target, loss))
输出:
input:tensor([[0.6614, 0.2669],
[0.0617, 0.6213]])
target:tensor([[-0.4519, -0.1661],
[-1.5228, 0.3817]])
Poisson NLL loss:tensor([[2.2363, 1.3503],
[1.1575, 1.6242]])
9. nn.KLDivLoss
测试代码:
# ----------------------------------------- 9 KL Divergence Loss ----------------------------------------------
# flag = 0
flag = 1
if flag:
inputs = torch.tensor([[0.5, 0.3, 0.2], [0.2, 0.3, 0.5]])
# pytorch函数计算公式不是原始定义公式,其对输入默认已经取log了,在损失函数计算中比公式定义少了一个log(input)的操作
# 因此公式定义里有一个log(y_i / x_i),在pytorch变为了 log(y_i) - x_i,
inputs = F.log_softmax(inputs, 1)
target = torch.tensor([[0.9, 0.05, 0.05], [0.1, 0.7, 0.2]], dtype=torch.float)
loss_f_none = nn.KLDivLoss(reduction='none')
loss_f_mean = nn.KLDivLoss(reduction='mean')
loss_f_bs_mean = nn.KLDivLoss(reduction='batchmean')
loss_none = loss_f_none(inputs, target)
loss_mean = loss_f_mean(inputs, target)
loss_bs_mean = loss_f_bs_mean(inputs, target)
print("loss_none:\n{}\nloss_mean:\n{}\nloss_bs_mean:\n{}".format(loss_none, loss_mean, loss_bs_mean))
输出:
loss_none:
tensor([[-0.5448, -0.1648, -0.1598],
[-0.2503, -0.4597, -0.4219]])
loss_mean:
-0.3335360288619995
loss_bs_mean:
-1.0006080865859985
mean是所有值之和除以个数(6),bs_mean是所有值之和除以批(2)。
10. nn.MarginRankingLoss
测试代码:
# ---------------------------------------------- 10 Margin Ranking Loss --------------------------------------------
# flag = 0
flag = 1
if flag:
x1 = torch.tensor([[1], [2], [3]], dtype=torch.float)
x2 = torch.tensor([[2], [2], [2]], dtype=torch.float)
target = torch.tensor([1, 1, -1], dtype=torch.float)
loss_f_none = nn.MarginRankingLoss(margin=0, reduction='none')
loss = loss_f_none(x1, x2, target)
print(loss)
输出:
tensor([[1., 1., 0.],
[0., 0., 0.],
[0., 0., 1.]])
11. nn.MultiLabelMarginLoss
测试代码:
# ---------------------------------------------- 11 Multi Label Margin Loss -----------------------------------------
# flag = 0
flag = 1
if flag:
x = torch.tensor([[0.1, 0.2, 0.4, 0.8]])
y = torch.tensor([[0, 3, -1, -1]], dtype=torch.long)
loss_f = nn.MultiLabelMarginLoss(reduction='none')
loss = loss_f(x, y)
print(loss)
输出:
tensor([0.8500])
12. nn.SoftMarginLoss
测试代码:
# ---------------------------------------------- 12 SoftMargin Loss -----------------------------------------
# flag = 0
flag = 1
if flag:
inputs = torch.tensor([[0.3, 0.7], [0.5, 0.5]])
target = torch.tensor([[-1, 1], [1, -1]], dtype=torch.float)
loss_f = nn.SoftMarginLoss(reduction='none')
loss = loss_f(inputs, target)
print("SoftMargin: ", loss)
输出:
SoftMargin: tensor([[0.8544, 0.4032],
[0.4741, 0.9741]])
13. nn.MultiLabelSoftMarginLoss
测试代码:
# ---------------------------------------------- 13 MultiLabel SoftMargin Loss -----------------------------------------
# flag = 0
flag = 1
if flag:
inputs = torch.tensor([[0.3, 0.7, 0.8]])
target = torch.tensor([[0, 1, 1]], dtype=torch.float)
loss_f = nn.MultiLabelSoftMarginLoss(reduction='none')
loss = loss_f(inputs, target)
print("MultiLabel SoftMargin: ", loss)
输出:
MultiLabel SoftMargin: tensor([0.5429])
14. nn.MultiMarginLoss
测试代码:
# ---------------------------------------------- 14 Multi Margin Loss -----------------------------------------
# flag = 0
flag = 1
if flag:
x = torch.tensor([[0.1, 0.2, 0.7], [0.2, 0.5, 0.3]])
y = torch.tensor([1, 2], dtype=torch.long)
loss_f = nn.MultiMarginLoss(reduction='none')
loss = loss_f(x, y)
print("Multi Margin Loss: ", loss)
输出:
Multi Margin Loss: tensor([0.8000, 0.7000])
15. nn.TripletMarginLoss
测试代码:
# ---------------------------------------------- 15 Triplet Margin Loss -----------------------------------------
# flag = 0
flag = 1
if flag:
anchor = torch.tensor([[1.]])
pos = torch.tensor([[2.]])
neg = torch.tensor([[0.5]])
loss_f = nn.TripletMarginLoss(margin=1.0, p=1)
loss = loss_f(anchor, pos, neg)
print("Triplet Margin Loss", loss)
输出:
Triplet Margin Loss tensor(1.5000)
16. nn.HingeEmbeddingLoss
测试代码:
# ---------------------------------------------- 16 Hinge Embedding Loss -----------------------------------------
# flag = 0
flag = 1
if flag:
inputs = torch.tensor([[1., 0.8, 0.5]])
target = torch.tensor([[1, 1, -1]])
loss_f = nn.HingeEmbeddingLoss(margin=1, reduction='none')
loss = loss_f(inputs, target)
print("Hinge Embedding Loss", loss)
输出:
Hinge Embedding Loss tensor([[1.0000, 0.8000, 0.5000]])
17. nn.CosineEmbeddingLoss
测试代码:
# ---------------------------------------------- 17 Cosine Embedding Loss -----------------------------------------
# flag = 0
flag = 1
if flag:
x1 = torch.tensor([[0.3, 0.5, 0.7], [0.3, 0.5, 0.7]])
x2 = torch.tensor([[0.1, 0.3, 0.5], [0.1, 0.3, 0.5]])
target = torch.tensor([[1, -1]], dtype=torch.float)
loss_f = nn.CosineEmbeddingLoss(margin=0., reduction='none')
loss = loss_f(x1, x2, target)
print("Cosine Embedding Loss", loss)
输出:
Cosine Embedding Loss tensor([[0.0167, 0.9833]])
18. nn.CTCLoss
测试代码:
# ---------------------------------------------- 18 CTC Loss -----------------------------------------
# flag = 0
flag = 1
if flag:
T = 50 # Input sequence length
C = 20 # Number of classes (including blank)
N = 16 # Batch size
S = 30 # Target sequence length of longest target in batch
S_min = 10 # Minimum target length, for demonstration purposes
# Initialize random batch of input vectors, for *size = (T,N,C)
inputs = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_()
# Initialize random batch of targets (0 = blank, 1:C = classes)
target = torch.randint(low=1, high=C, size=(N, S), dtype=torch.long)
input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long)
target_lengths = torch.randint(low=S_min, high=S, size=(N,), dtype=torch.long)
ctc_loss = nn.CTCLoss()
loss = ctc_loss(inputs, target, input_lengths, target_lengths)
print("CTC loss: ", loss)
输出:
CTC loss: tensor(7.5385, grad_fn=<MeanBackward0>)