XGBoost模型可解释性要用到xgb.predict()里面的参数pred_leaf,pred_contribs,
试了用XGBRegressor+fit()来训练模型,predict的时候没有pred_leaf,pred_contribs两个参数项,所以我用了xgb.train方式
模型训练过程:
#----------------------------------------------------------------------XGBoost 模型-------------------------------------------------------------#
#xgboost调参策略
import xgboost as xgb
import pandas as pd
import numpy as np
import pickle
import sys
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.preprocessing import StandardScaler
# from sklearn.grid_search import GridSearchCV 老版本
from scipy.sparse import csr_matrix,hstack
from sklearn.model_selection import KFold,train_test_split,GridSearchCV
from xgboost import XGBRFRegressor,XGBRegressor
import warnings
warnings.filterwarnings('ignore')
#数据预处理
train =pd.read_csv('D:/机器学习算法/allstate-claims-severity/train.csv')
#对数转换
train['log_loss']=np.log(train['loss'])
#特征分成连续和离散特征
features=[x for x in train.columns if x not in ['id','loss','log_loss']]
cat_features=[x for x in train.select_dtypes(include=['object']).columns
if x not in ['id','loss','log_loss']] #72个二值,88个四值
num_features=[x for x in train.select_dtypes(exclude=['object']).columns
if x not in ['id','loss','log_loss']]
print("Categorical features:",len(cat_features))
print("Numerical features:",len(num_features))
ntrain=train.shape[0]
train_x=train[features]
train_y=train['log_loss']
#离散类别型特征转换成数字标志
for c in range(len(cat_features)):
train_x[cat_features[c]]=train_x[cat_features[c]].astype('category').cat.codes
#字典:字符字段映射关系
category_dict={
col: {
cat: n for n, cat in enumerate(train[col].astype('category').cat.categories )}
for col in cat_features}
#Simple XGBoost Model
#Model :xgboost自定义了一个数据矩阵类DMatrix,会在训练开始时进行一遍预处理,从而提高之后每次迭代的效率
dtrain=xgb.DMatrix(train_x,train['log_loss'])
'''
XGBoost参数
'booster':'gbtree',梯度提升回归树
'objective':'multi:softmax',多分类的问题。损失函数,分类和回归
'num_class':10,类别数,与multisoftmax并用
'ganmma':损失下降多少才进行分裂
'max_depth':12,构建树的深度,越