Pytorch中CrossEntropyLoss和BCE理解

一、CrossEntropyLoss

import torch
import torch.nn as nn
import torch.nn.functional as F

def cross():
    x_input = torch.randn(4, 3)  # 4个样本, 3个类别n=3,类别id分别是0,1,2
    print('x_input:\n', x_input)
    y_target = torch.tensor([1, 2, 0, 2])  # 4个样本的标签分别是1,2,0,2
    print('y_target:\n', y_target)

    softmax = nn.Softmax(dim=1)  # softmax, 行处理, 每行的和为1
    soft = softmax(x_input)  # soft = (exp(x1),...,exp(xn)) / sum(exp(xi))
    print('softmax:\n', soft)  # shape(4,3)

    log_softmax = nn.LogSoftmax(dim=1)  # log softmax, 行处理
    log_soft = log_softmax(x_input)  # log_soft = (log(soft(1)),...,log(soft(n)))
    print('log_softmax:\n', log_soft)  # shape(4,3)

    nll_loss = nn.NLLLoss()  # negative log likelihood loss
    loss = nll_loss(log_soft, y_target)  # loss = -sum(sum(log_soft(i)*1{i=label}))
    print('nll_loss:\n', loss)  # shape(1)

    nll_loss2 = nn.NLLLoss(reduction='none')  # negative log likelihood loss
    loss2 = nll_loss2(log_soft, y_target)  # loss2 = -sum(log_soft(i)*1{i=label})
    print('nll_loss2:\n', loss2)  # shape(4)

    cross_entropy_loss = nn.CrossEntropyLoss()  # cross entropy loss = nll_loss
    ce_loss = cross_entropy_loss(x_input, y_target)  # loss = 所有样本的平均交叉熵损失值
    print('ce_loss:\n', ce_loss)  # shape(1)

    cross_entropy_loss2 = nn.CrossEntropyLoss(reduction='none')  # cross entropy loss2 = nll_loss2
    ce_loss2 = cross_entropy_loss2(x_input, y_target)  # loss = 每个样本的交叉熵损失值
    print('ce_loss:\n', ce_loss2)  # shape(4)

x_input:
 tensor([[ 0.93692, -0.88950,  0.16872],
        [ 0.10782,  1.63110,  0.14352],
        [-0.07629, -0.26102,  1.89408],
        [ 0.62106,  0.37981, -0.04945]])
y_target:
 tensor([1, 2, 0, 2])
softmax:
 tensor([[0.61545, 0.09908, 0.28547],
        [0.15098, 0.69256, 0.15646],
        [0.11105, 0.09232, 0.79662],
        [0.43533, 0.34202, 0.22265]])
log_softmax:
 tensor([[-0.48541, -2.31182, -1.25360],
        [-1.89063, -0.36736, -1.85494],
        [-2.19774, -2.38247, -0.22737],
        [-0.83164, -1.07290, -1.50215]])
nll_loss:
 tensor(1.96666)
nll_loss2:
 tensor([2.31182, 1.85494, 2.19774, 1.50215])
ce_loss:
 tensor(1.96666)
ce_loss:
 tensor([2.31182, 1.85494, 2.19774, 1.50215])

二、BCEWithLogitsLoss

import torch
import torch.nn as nn
import torch.nn.functional as F

def binary_cross():
    x_input = torch.rand(4, 3)  # 4个样本, 3个类别n=3,类别id分别是0,1,2   range=[0,1]
    print('x_input:\n', x_input)
    y_target = torch.empty(4, 3).random_(2)  # shape as x_input shape   range={0., 1.}
    print('y_target:\n', y_target)

    bce_loss = nn.BCELoss()
    loss = bce_loss(x_input, y_target)  # loss = 所有样本的平均二值交叉熵损失值
    print('bce_loss:\n', loss)  # shape[1]

    bcel_loss1 = nn.BCEWithLogitsLoss()
    loss1 = bcel_loss1(x_input, y_target)  # loss = 所有样本的平均二值交叉熵损失值
    print('bcel_loss:\n', loss1)  # shape[1]

    bcel_loss2 = nn.BCEWithLogitsLoss(reduction='none')
    loss2 = bcel_loss2(x_input, y_target)  # loss2 = 二值交叉熵损失值
    print('bcel_loss2:\n', loss2)  # shape[4,3]

    loss3 = F.binary_cross_entropy(x_input, y_target, reduction='none')  # as nn.BCELoss
    print('loss3:\n', loss3)  # shape[4,3]

    loss4 = F.binary_cross_entropy_with_logits(x_input, y_target, reduction='none')  # as nn.BCEWithLogitsLoss
    print('loss4:\n', loss4)  # shape[4,3]

x_input:
 tensor([[0.60999, 0.37137, 0.19881],
        [0.05420, 0.96486, 0.95869],
        [0.63311, 0.76424, 0.54986],
        [0.55075, 0.63117, 0.80683]])
y_target:
 tensor([[1., 0., 0.],
        [0., 0., 1.],
        [1., 1., 1.],
        [0., 0., 1.]])
bce_loss:
 tensor(0.66356)
bcel_loss:
 tensor(0.67972)
bcel_loss2:
 tensor([[0.43396, 0.89597, 0.79748],
        [0.72062, 1.28769, 0.32454],
        [0.42588, 0.38232, 0.45554],
        [1.00597, 1.05772, 0.36899]])
loss3:
 tensor([[0.49431, 0.46421, 0.22165],
        [0.05573, 3.34830, 0.04219],
        [0.45712, 0.26887, 0.59809],
        [0.80018, 0.99742, 0.21464]])
loss4:
 tensor([[0.43396, 0.89597, 0.79748],
        [0.72062, 1.28769, 0.32454],
        [0.42588, 0.38232, 0.45554],
        [1.00597, 1.05772, 0.36899]])

Process finished with exit code 0
 

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
torch.nn.BCELoss是PyTorch的一个损失函数,用于二分类问题,计算二值交叉熵损失。对于输入的预测值和目标值,该函数将其进行Sigmoid激活并计算二值交叉熵损失。 BCELoss函数的参数包括weight、size_average。weight是用于样本加权的张量,size_average是一个布尔值,指定是否对损失进行平均。 使用BCELoss的步骤如下: 1. 首先,创建一个BCELoss的实例。 2. 然后,将网络的输出通过Sigmoid激活。 3. 接着,将Sigmoid激活后的输出和目标值作为参数传入BCELoss函数。 4. 最后,调用BCELoss函数,即可得到计算的二值交叉熵损失。 该损失函数的值越小,表示预测结果与目标值的差距越小,模型的性能也越好。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* *2* [torch.nn.BCELoss](https://blog.csdn.net/weixin_41978699/article/details/120589510)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] - *3* [Pytorchtorch.nn的损失函数](https://download.csdn.net/download/weixin_38747144/13743344)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] [ .reference_list ]
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值