CrossEntropy的numpy实现和Pytorch调用

1. Numpy实现

import torch
import numpy as np
from torch.nn import functional as F


# 定义softmax函数
def softmax(x):
    return np.exp(x) / np.sum(np.exp(x))

# 利用numpy计算
def cross_entropy_np(x, y):
    x_softmax = [softmax(x[i]) for i in range(len(x))]
    x_log = [np.log(x_softmax[i][y[i]]) for i in range(len(y))]
    loss = - np.sum(x_log) / len(y)
    return loss


# 另外一种实现方式,y转成onehot形式,比较直观
def cross_entropy_np2(x, y):
    num_data, num_class = x.shape
    log_p = np.array([np.log(softmax(x[i])) for i in range(num_data)])
    y_onehot = np.eye(num_class)[y]
    loss = - np.sum(y_onehot * log_p) / num_data
    return round(loss, 4)

if __name__ == '__main__':
    # 假设有数据x, y
    x = np.array([[0.093, 0.1939, -1.0649, 0.4476, -2.0769],
                  [-1.8024, 0.3696, 0.7796, -1.0346, 0.473],
                  [0.5593, -2.5067, -2.1275, 0.5548, -1.6639]])
    
    y = np.array([1, 2, 3])
    print('numpy result: ', cross_entropy_np(x, y)) 
# numpy result:  1.0155949508195155

2. Pytorch实现

import torch
import numpy as np
from torch.nn import functional as F


# 调用Pytorch的nn.CrossEntropy计算
def cross_entropy_pth(x, y):
    x_pth = torch.from_numpy(x)
    y_pth = torch.from_numpy(y).long()
    loss = F.cross_entropy(x_pth, y_pth)
    return loss

if __name__ == '__main__':
    # 假设有数据x, y
    x = np.array([[0.093, 0.1939, -1.0649, 0.4476, -2.0769],
                  [-1.8024, 0.3696, 0.7796, -1.0346, 0.473],
                  [0.5593, -2.5067, -2.1275, 0.5548, -1.6639]])
    
    y = np.array([1, 2, 3])
    print('pytorch result: ', cross_entropy_pth(x, y))
# pytorch result:  tensor(1.0156, dtype=torch.float64)

3. nn.CrossEntropy的weight参数

下图是CrossEntropy的说明文档:
在这里插入图片描述weight参数给不同类别项的loss分配了不同权重,可以解决样本不均衡问题.

可以看到weight的Size必须是跟所分的类别一样长,如上述代码中所示,每个x数据有5维数据(即文档中的C),表示有5个类别,因此weight的维度必须是5

以下举例说明:

import torch
import numpy as np
from torch.nn import functional as F


# 调用Pytorch的nn.CrossEntropy计算
def cross_entropy_pth(x, y, weight=None):
    x_pth = torch.from_numpy(x)
    y_pth = torch.from_numpy(y).long()
    weight = torch.from_numpy(weight).double()
    loss = F.cross_entropy(x_pth, y_pth, weight=weight)
    return loss


# numpy定义softmax函数
def softmax(x):
    return np.exp(x) / np.sum(np.exp(x))


# 利用numpy计算
def cross_entropy_np(x, y, weight=None):
    x_softmax = [softmax(x[i]) for i in range(len(x))]
    x_log = [np.log(x_softmax[i][y[i]]) for i in range(len(y))]
    if len(weight):
        x_log = [x_log[i]*weight[y[i]] for i in range(len(y))]
        total_num = sum([weight[i] for i in y])
    else:
        total_num = len(y)
    loss = - np.sum(x_log) / total_num
    return loss


if __name__ == '__main__':
    # 假设有数据x, y
    x = np.array([[0.093, 0.1939, -1.0649, 0.4476, -2.0769],
                  [-1.8024, 0.3696, 0.7796, -1.0346, 0.473],
                  [0.5593, -2.5067, -2.1275, 0.5548, -1.6639]])
    
    y = np.array([1, 2, 3])
    
    #  假设给第2,3类分配更多权重,权重参数设为2
    weight = np.array([1, 1, 2, 2, 1])
    
    print('numpy result: ', cross_entropy_np(x, y, weight))
    print('pytorch result: ', cross_entropy_pth(x, y, weight=weight))
# numpy result:  0.9636395529496566
# pytorch result:  tensor(0.9636, dtype=torch.float64)

  • 4
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
CutMix是一种数据增强技术,可以在训练神经网络时,将两个不同的图像混合在一起,生成一个新的图像。这种技术可以增加模型的鲁棒性和泛化能力。 以下是使用PyTorch实现CutMix数据增强的代码: ```python import torch import numpy as np import random def cutmix_data(x, y, alpha=1.0): lam = np.random.beta(alpha, alpha) batch_size = x.size()[0] index = torch.randperm(batch_size) y_a, y_b = y, y[index] bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam) x[:, :, bbx1:bbx2, bby1:bby2] = x[index, :, bbx1:bbx2, bby1:bby2] lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.size()[-1] * x.size()[-2])) return x, y_a, y_b, lam def rand_bbox(size, lam): W = size[2] H = size[3] cut_rat = np.sqrt(1. - lam) cut_w = np.int(W * cut_rat) cut_h = np.int(H * cut_rat) # uniform cx = np.random.randint(W) cy = np.random.randint(H) bbx1 = np.clip(cx - cut_w // 2, 0, W) bby1 = np.clip(cy - cut_h // 2, 0, H) bbx2 = np.clip(cx + cut_w // 2, 0, W) bby2 = np.clip(cy + cut_h // 2, 0, H) return bbx1, bby1, bbx2, bby2 ``` 正则交叉熵损失函数是一种可以减少标签噪声对模型训练的影响的损失函数。以下是使用PyTorch实现正则交叉熵损失函数的代码: ```python import torch.nn.functional as F def reg_cross_entropy_loss(input, target, weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean', reg_lambda=0.1): logp = F.log_softmax(input, dim=1) loss = F.nll_loss(logp, target, weight, size_average, ignore_index, reduce, reduction) reg_loss = torch.mean(torch.sum(torch.square(torch.exp(logp)), dim=1)) return loss + reg_lambda * reg_loss ``` 在调用此函数时,您可以指定reg_lambda参数来控制正则化的程度。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值