mxnet METRIC自定义评估验证函数

 

insightface自定义loss:

    params = [1.e-10]
    sel = mx.symbol.argmax(data = fc7, axis=1)
    sel = (sel==gt_label)
    norm = embedding*embedding
    norm = mx.symbol.sum(norm, axis=1)
    norm = norm+params[0]
    feature_incay = sel/norm
    feature_incay = mx.symbol.mean(feature_incay) * args.incay
    extra_loss = mx.symbol.MakeLoss(feature_incay)

 

自定义损失函数

# -*- coding=utf-8 -*-

import mxnet as mx
import numpy as np
import logging

logging.basicConfig(level=logging.INFO)

x = mx.sym.Variable('data')
y = mx.sym.FullyConnected(data=x, num_hidden=1)
label = mx.sym.Variable('label')
cross_entropy = label * log(out) + (1 - label) * log(1 - out)
loss = MakeLoss(cross_entropy)
pred_loss = mx.sym.Group([mx.sym.BlockGrad(y), loss])
ex = pred_loss.simple_bind(mx.cpu(), data=(32, 2))

# test
test_data = mx.nd.array(np.random.random(size=(32, 2)))
test_label = mx.nd.array(np.random.random(size=(32, 1)))

ex.forward(is_train=True, data=test_data, label=test_label)
ex.backward()

print ex.arg_dict
fc_w = ex.arg_dict['fullyconnected0_weight'].asnumpy()
fc_w_grad = ex.grad_arrays[1].asnumpy()
fc_bias = ex.arg_dict['fullyconnected0_bias'].asnumpy()
fc_bias_grad = ex.grad_arrays[2].asnumpy()

logging.info('fc_weight:{}, fc_weights_grad:{}'.format(fc_w, fc_w_grad))
logging.info('fc_bias:{}, fc_bias_grad:{}'.format(fc_bias, fc_bias_grad))

 

使用makeloss只能得到损失而不是预测,要得到损失和预测需要使用mx.sym.Group()和mx.sym.BlockGrad()
https://stackoverflow.com/questions/42304820/how-to-weight-observations-in-mxnet/42323339#42323339
 

label = mx.sym.Variable('label')
out = mx.sym.Activation(data=final, act_type='sigmoid')
ce = label * mx.sym.log(out) + (1 - label) * mx.sym.log(1 - out)
weights = mx.sym.Variable('weights')
loss = mx.sym.MakeLoss(weigths * ce, normalization='batch')

 

# -*- coding=utf-8 -*-

import mxnet as mx
import numpy as np
import logging

logging.basicConfig(level=logging.INFO)

x = mx.sym.Variable('data')
y = mx.sym.FullyConnected(data=x, num_hidden=1)
label = mx.sym.Variable('label')
cross_entropy = label * log(out) + (1 - label) * log(1 - out)
loss = MakeLoss(cross_entropy)
pred_loss = mx.sym.Group([mx.sym.BlockGrad(y), loss])
ex = pred_loss.simple_bind(mx.cpu(), data=(32, 2))

# test
test_data = mx.nd.array(np.random.random(size=(32, 2)))
test_label = mx.nd.array(np.random.random(size=(32, 1)))

ex.forward(is_train=True, data=test_data, label=test_label)
ex.backward()

print ex.arg_dict
fc_w = ex.arg_dict['fullyconnected0_weight'].asnumpy()
fc_w_grad = ex.grad_arrays[1].asnumpy()
fc_bias = ex.arg_dict['fullyconnected0_bias'].asnumpy()
fc_bias_grad = ex.grad_arrays[2].asnumpy()

logging.info('fc_weight:{}, fc_weights_grad:{}'.format(fc_w, fc_w_grad))
logging.info('fc_bias:{}, fc_bias_grad:{}'.format(fc_bias, fc_bias_grad))

 

import mxnet as mx
class Siamise_metric(mx.metric.EvalMetric):

    def __init__(self, name='siamise_acc'):
        super(Siamise_metric, self).__init__(name=name)

    def update(self, label, pred):
        preds = pred[0]
        labels = label[0]
        preds_label = preds.asnumpy().ravel()
        labels = labels.asnumpy().ravel()
        #self.sum_metric += labels[preds_label < 0.5].sum() + len(
        #    labels[preds_label >= 0.5]) - labels[preds_label >= 0.5].sum()
        #self.num_inst += len(labels)

        pred = (preds_label < 0.5)
        acc = (pred == labels).sum()
        self.sum_metric += acc
        self.num_inst += len(labels)  # numpy.prod(label.shape)


class Contrastive_loss(mx.metric.EvalMetric):
    def __init__(self, name='contrastive_loss'):
        super(Contrastive_loss, self).__init__(name=name)

    def update(self, label, pred):
        loss = pred[1].asnumpy()
        self.sum_metric += loss
        self.num_inst += len(loss)

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

AI算法网奇

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值