回归问题
from xgboost import XGBRegressor
model =XGBRegressor()
train=model.fit(X, y)#X为特征属性,y为标签
分类问题:
from xgboost import XGBClassifier
model =XGBClassifier()
train=model.fit(X, y)#X为特征属性,y为标签
代码
可以寻找最佳迭代次数,均方值误差,模型精确度
def modelfit(alg, dtrain, predictors, useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(dtrain, label=predictors)
# XGBoost有一个很有用的函数“cv”,这个函数可以在每一次迭代中使用交叉验证,并返回理想的决策树数量。
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics='auc', early_stopping_rounds=early_stopping_rounds)
t=np.arange(0,len(cvresult["train-auc-mean"]))
alg.set_params(n_estimators=cvresult.shape[0])
# Fit the algorithm on the data
alg.fit(dtrain, predictors, eval_metric='auc')
dtrain_predprob = alg.predict_proba(dtrain)[:, 1]
# Print model report:
print("\nModel Report")
print("Accuracy : %.4g" % explained_variance_score(predictors, dtrain_predictions))
print("AUC Score (Train): %f" % metrics.explained_variance_score(predictors, dtrain_predprob))
feat_imp = pd.Series(alg.get_booster().get_fscore()).sort_values(ascending=True)
print("feat_imp", "*" * 30)
print("feature_name feature_importance_score")
b=list(feat_imp.index)
print(feat_imp.index)
print(list(feat_imp.index))
# print(len(list(feat_imp)))
# print(len(feat_imp))
return cvresult.shape[0]