XGB-25:Callback函数

本文档提供了XGBoost Python包中使用的回调API的基本概述。在XGBoost 1.3中,为Python包设计了一个新的回调接口,它为设计各种扩展提供了灵活性,用于训练。此外,XGBoost还预定义了许多回调函数,用于支持提前停止early stopping、检查点checkpoints等。

使用内置回调函数

默认情况下,XGBoost 中的训练方法具有参数,如 early_stopping_roundsverbose/verbose_eval,当指定这些参数时,训练过程将在内部定义相应的回调函数。例如,当指定了 early_stopping_rounds 时,EarlyStopping 回调将在迭代循环内调用。也可以直接将此回调函数传递给 XGBoost:

from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split

import xgboost as xgb
import numpy as np

X, y = load_breast_cancer(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, stratify=y, random_state=94)

D_train = xgb.DMatrix(X_train, y_train)
D_valid = xgb.DMatrix(X_valid, y_valid)

# Define a custom evaluation metric used for early stopping.
def eval_error_metric(predt, dtrain: xgb.DMatrix):
    label = dtrain.get_label()
    r = np.zeros(predt.shape)
    gt = predt > 0.5
    r[gt] = 1 - label[gt]
    le = predt <= 0.5
    r[le] = label[le]
    return 'CustomErr', np.sum(r)

# Specify which dataset and which metric should be used for early stopping.
early_stop = xgb.callback.EarlyStopping(rounds=early_stopping_rounds,
                                        metric_name='CustomErr',
                                        data_name='Train')

booster = xgb.train(
    {'objective': 'binary:logistic',
     'eval_metric': ['error', 'rmse'],
     'tree_method': 'hist'}, D_train,
    evals=[(D_train, 'Train'), (D_valid, 'Valid')],
    feval=eval_error_metric,
    num_boost_round=1000,
    callbacks=[early_stop],
    verbose_eval=False)

dump = booster.get_dump(dump_format='json')
assert len(early_stop.stopping_history['Train']['CustomErr']) == len(dump)

定义自己的回调函数

XGBoost提供了一个回调接口类:TrainingCallback,用户定义的回调应该继承这个类并覆盖相应的方法。在示例中有使用和定义回调函数的工作示例。

import argparse
import os
import tempfile
from typing import Dict

import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split

import xgboost as xgb


class Plotting(xgb.callback.TrainingCallback):
    """Plot evaluation result during training.  Only for demonstration purpose as it's
    quite slow to draw using matplotlib.

    """

    def __init__(self, rounds: int) -> None:
        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(111)
        self.rounds = rounds
        self.lines: Dict[str, plt.Line2D] = {}
        self.fig.show()
        self.x = np.linspace(0, self.rounds, self.rounds)
        plt.ion()

    def _get_key(self, data: str, metric: str) -> str:
        return f"{data}-{metric}"

    def after_iteration(
        self, model: xgb.Booster, epoch: int, evals_log: Dict[str, dict]
    ) -> bool:
        """Update the plot."""
        if not self.lines:
            for data, metric in evals_log.items():
                for metric_name, log in metric.items():
                    key = self._get_key(data, metric_name)
                    expanded = log + [0] * (self.rounds - len(log))
                    (self.lines[key],) = self.ax.plot(self.x, expanded, label=key)
                    self.ax.legend()
        else:
            # https://pythonspot.com/matplotlib-update-plot/
            for data, metric in evals_log.items():
                for metric_name, log in metric.items():
                    key = self._get_key(data, metric_name)
                    expanded = log + [0] * (self.rounds - len(log))
                    self.lines[key].set_ydata(expanded)
            self.fig.canvas.draw()
        # False to indicate training should not stop.
        return False


def custom_callback() -> None:
    """Demo for defining a custom callback function that plots evaluation result during
    training."""
    X, y = load_breast_cancer(return_X_y=True)
    X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)

    D_train = xgb.DMatrix(X_train, y_train)
    D_valid = xgb.DMatrix(X_valid, y_valid)

    num_boost_round = 100
    plotting = Plotting(num_boost_round)

    # Pass it to the `callbacks` parameter as a list.
    xgb.train(
        {
            "objective": "binary:logistic",
            "eval_metric": ["error", "rmse"],
            "tree_method": "hist",
            "device": "cuda",
        },
        D_train,
        evals=[(D_train, "Train"), (D_valid, "Valid")],
        num_boost_round=num_boost_round,
        callbacks=[plotting],
    )


def check_point_callback() -> None:
    """Demo for using the checkpoint callback. Custom logic for handling output is
    usually required and users are encouraged to define their own callback for
    checkpointing operations. The builtin one can be used as a starting point.

    """
    # Only for demo, set a larger value (like 100) in practice as checkpointing is quite
    # slow.
    rounds = 2

    def check(as_pickle: bool) -> None:
        for i in range(0, 10, rounds):
            if i == 0:
                continue
            if as_pickle:
                path = os.path.join(tmpdir, "model_" + str(i) + ".pkl")
            else:
                path = os.path.join(
                    tmpdir,
                    f"model_{i}.{xgb.callback.TrainingCheckPoint.default_format}",
                )
            assert os.path.exists(path)

    X, y = load_breast_cancer(return_X_y=True)
    m = xgb.DMatrix(X, y)
    # Check point to a temporary directory for demo
    with tempfile.TemporaryDirectory() as tmpdir:
        # Use callback class from xgboost.callback
        # Feel free to subclass/customize it to suit your need.
        check_point = xgb.callback.TrainingCheckPoint(
            directory=tmpdir, interval=rounds, name="model"
        )
        xgb.train(
            {"objective": "binary:logistic"},
            m,
            num_boost_round=10,
            verbose_eval=False,
            callbacks=[check_point],
        )
        check(False)

        # This version of checkpoint saves everything including parameters and
        # model.  See: doc/tutorials/saving_model.rst
        check_point = xgb.callback.TrainingCheckPoint(
            directory=tmpdir, interval=rounds, as_pickle=True, name="model"
        )
        xgb.train(
            {"objective": "binary:logistic"},
            m,
            num_boost_round=10,
            verbose_eval=False,
            callbacks=[check_point],
        )
        check(True)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--plot", default=1, type=int)
    args = parser.parse_args()

    check_point_callback()

    if args.plot:
        custom_callback()

参考

  • 4
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
XGBoost(eXtreme Gradient Boosting)是一种基于决策树的集成学习算法,它在Kaggle等数据科学竞赛中表现优异。XGBoost使用梯度提升算法(Gradient Boosting)来训练决策树模型,该算法通过不断迭代提高模型的预测准确率。其主要思想是将许多弱的学习器(决策树)组合成一个强的学习器。 SecureBoost是一种使用联邦学习的安全XGBoost算法,它可以在多个参与方之间进行模型训练,同时保证数据隐私和模型安全。SecureBoost算法的主要过程如下: 1. 初始化:参与方之间通过安全多方计算(Secure Multiparty Computation,SMC)协议进行通信,并对模型参数进行初始化。 2. 加密:参与方将本地的数据进行加密,然后将加密后的数据发送给其他参与方。 3. 训练:参与方使用加密数据进行模型训练,并将训练得到的模型参数发送给其他参与方。在模型训练过程中,使用了加密梯度提升算法(Encrypted Gradient Boosting,EGB)来进行模型训练。 4. 反向传播:参与方使用接收到的模型参数进行反向传播,计算每个参与方的梯度,并将梯度发送给其他参与方。 5. 模型更新:参与方使用接收到的梯度更新模型参数,并将更新后的模型参数发送给其他参与方。 6. 合并:参与方将更新后的模型参数进行合并,得到最终的模型参数。 7. 预测:参与方使用最终的模型参数进行预测,最终得到预测结果。 总体来说,SecureBoost算法通过使用加密技术和联邦学习来保证数据隐私和模型安全,可以在多个参与方之间进行模型训练,使得模型更加准确和鲁棒。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

uncle_ll

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值