代码
import torch
import torch.nn.functional as F
from torch.autograd import Variable
class GHMC_Loss:
def __init__(self, bins=10, momentum=0.0):
self.bins = bins
self.momentum = momentum
self.edges = [float(x) / bins for x in range(bins+1)]
self.edges[-1] += 1e-6
if momentum > 0:
self.acc_sum = [0.0 for _ in range(bins)]
def calc(self, input, target, mask):
""" Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
"""
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(input)
# gradient length
self.g = torch.abs(input.sigmoid().detach() - target)
valid = mask > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0 # n valid bins
for i in range(self.bins):
inds = (self.g >= edges[i]) & (self.g < edges[i+1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(
input, target, weights, reduction='sum') / tot
return loss, self.acc_sum,self.g
第一部分
class GHMC_Loss:
def __init__(self, bins=10, momentum=0.0):#构造函数,类实例化是自动执行
‘’’初始化实例属性’’’
self.bins = bins
self.momentum = momentum
self.edges = [float(x) / bins for x in range(bins+1)]
self.edges[-1] += 1e-6
if momentum > 0:
self.acc_sum = [0.0 for _ in range(bins)]
class是关键字(表示要开始创建类了),GHMC_Loss是新建类名
第二部分
def calc(self, input, target, mask):#创建类中函数,self特殊参数(必填)
""" Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
"""
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(input)
zeros_like函数,从函数名不难猜到,这个函数是用于生成和输入tensor大小相同的全零tensor的。
比如:
程序
x=torch.Tensor(5,3)
print(torch.zeros_like(x)) #torch.FloatTensor of size 5x3
结果:
tensor([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
第三部分
# gradient length
self.g = torch.abs(input.sigmoid().detach() - target)
torch.abs(input, out=None):
计算输入张量的每个元素绝对值
input(Tensor) - 输入张量
out(Tensor, optional) - 结果张量
第四部分
valid = mask > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0 # n valid bins
for i in range(self.bins):
inds = (self.g >= edges[i]) & (self.g < edges[i+1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(
input, target, weights, reduction='sum') / tot
return loss, self.acc_sum,self.g
&按位与运算。
max() 方法返回给定参数的最大值,参数可以为序列。