import torch
import torch.nn as nn
import math
entroy = nn.CrossEntropyLoss()
input = torch.Tensor([[-0.7715, -0.6205, -0.2562]])
target = torch.tensor([0])
output = entroy(input, target)
print(output)
m = nn.LogSoftmax(dim=1)
loss = nn.NLLLoss()
input=m(input)
output = loss(input, target)
print('output:',output)
targets = torch.zeros(input.size()).scatter_(1, target.unsqueeze(1).data.cpu(), 1) # onehot 形式
out = (- targets * input).sum(dim=-1).mean() #(1,0,0) # 这句话就是 nn.NLLLoss() 的功能
print(out)
print("*****************")
print(input)
print(targets)
targets = (1 - 0.1) * targets + 0.1 / 3 # 这是标签平滑 先变成 onehot
loss = (- targets * input).sum(-1).mean()
print("标签平滑损失")
print(loss)
print(targets) #tensor([[1., 0., 0.]]) to tensor([[0.9333, 0.0333, 0.0333]])
tensor(1.3447)
output: tensor(1.3447)
tensor(1.3447)
*****************
tensor([[-1.3447, -1.1937, -0.8294]])
tensor([[1., 0., 0.]])
标签平滑损失
tensor(1.3225)
tensor([[0.9333, 0.0333, 0.0333]])
pytorch损失函数之nn.CrossEntropyLoss()、nn.NLLLoss()