# *********get xgb model's feature importance************
b2_model = xgb.Booster(model_file=model_path + '/b2_model_12.model')
importance = b2_model.get_fscore()
importance = sorted(importance.items(), key=operator.itemgetter(1), reverse=False)
df = pd.DataFrame(importance, columns=['feature', 'fscore'])
feat_num = df.shape[0]
for k in range(feat_num):
index = df.at[k, 'feature']
index = index.replace('f', '')
index = int(index)
name = feat_name[index]
df.at[k, 'feature'] = name
stop = 0
df['fscore'] = df['fscore'] / df['fscore'].sum()
df.to_csv(model_path + "xgb_feat_importance_12.csv", index=False)
# Plot the feature importances
plt.figure()
df.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(6,
xgboost 模型展示 特征重要性和决策树形状
最新推荐文章于 2023-12-31 11:32:23 发布
本文介绍了在使用xgboost时遇到的问题及其解决方案,包括未安装graphviz的解决方法——通过Anaconda Prompt安装,以及如何调整决策树图像的大小,以清晰查看特征重要性和树结构。
摘要由CSDN通过智能技术生成