The binary wheel will support GPU algorithms (gpu_exact, gpu_hist) on machines with NVIDIA GPUs. However, it will not support multi-GPU training; only single GPU will be used [Installation Guide]
For the moment, single gpu is enought for me
pip install xgboost
To enable gpu training, some parameter must pass when constructed i.e. params = {‘tree_method’: ‘gpu_hist’, ‘predictor’: ‘gpu_predictor’}, below is an example modified from [
Tune Learning Rate for Gradient Boosting with XGBoost in Python]
import pickle
import xgboost as xgb
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import StratifiedKFold, GridSearchCV
# load data
data = pd.read_csv('otto/train.csv')
dataset = data.values
# split data into X and y
X = dataset[:, 0:94]
y = dataset[:, 94]
# encode string class values as integers
label_encoded_y = LabelEncoder().fit_transform(y)
############### enable gpu speedup ###############################
params = {'tree_method': 'gpu_hist', 'predictor': 'gpu_predictor'}
model = xgb.XGBClassifier(**params)
##################################################################
learning_rate = [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3]
param_dict = dict(learning_rate=learning_rate)
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7)
grid_search = GridSearchCV(
model, param_grid=param_dict, cv=kfold, scoring='neg_log_loss', n_jobs=-1)
grid_search.fit(X, label_encoded_y)
print('best param: ', grid_search.best_params_)
print('best score: ', grid_search.best_score_)
means = grid_search.cv_results_['mean_test_score']
stds = grid_search.cv_results_['std_test_score']
params = grid_search.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
best_model = grid_search.best_estimator_
with open('bst_xgb.pkl', 'wb') as f:
pickle.dump(best_model, f)