null_importance特征筛选运行效率优化

本文介绍了如何使用LightGBM的随机森林模式进行二分类问题的模型训练,并通过shuffle功能生成随机数据来评估特征重要性。通过计算实际和随机扰动情况下的特征得分,提供了特征选择的方法。
摘要由CSDN通过智能技术生成

由于每次迭代都需要使用同一个数据集,只是label不一样,考虑将label取出来后进行随机打散,然后使用Dataset的set_label函数重新设置标签。从而优化数据集的创建环节。

from sklearn.metrics import *
from tqdm import tqdm
import lightgbm as lgb

def get_feature_importances_by_dtrain(dtrain, y_, train_features, shuffle=False,categorical_feats=[], seed=2024):
    if shuffle:
        # Here you could as well use a binomial distribution
        # y = data['TARGET'].copy().sample(frac=1.0)
        # 复用数据dtrain,从而优化数据构建时间
        y = y_.copy().sample(frac=1.0)
        # y = dtrain.get_label().copy().sample(frac=1.0)
        dtrain.set_label(y)
    else:
        y = y_

    # Fit LightGBM in RF mode, yes it's quicker than sklearn RandomForest
    lgb_params = {
        'objective': 'binary',
        'boosting_type': 'rf',
        'subsample': 0.623,
        'colsample_bytree': 0.7,
        'num_leaves': 127,
        'max_depth': 7,
        'seed': seed,
        'bagging_freq': 5,
        'verbose': -1,
        'n_jobs': 80,
#         'device': 'gpu',
#         'gpu_platform_id': 0,
#         'gpu_device_id': 0
    }

    # Fit the model
    clf = lgb.train(params=lgb_params, train_set=dtrain, num_boost_round=200, categorical_feature=categorical_feats)

    # Get feature importances
    imp_df = pd.DataFrame()
    imp_df["feature"] = list(train_features)
    imp_df["importance_gain"] = clf.feature_importance(importance_type='gain')
    imp_df["importance_split"] = clf.feature_importance(importance_type='split')
    #imp_df['trn_score'] = roc_auc_score(y, clf.predict(data[train_features]))
    imp_df['trn_score'] = roc_auc_score(y, clf.predict(dtrain.data))
    return imp_df

%%time
print('start')
label = 'label7d'
#                 &(train_data2['platformtype']=='ANDROID')\
#                 &(train_data2['terms']!=2)\
#                 &(train_data2['term_time_period']<=15)\
#                 &(train_data2['label0d']==0) # 限制当日未完件
#                 &(train_data2['is_0d_curp_reject']==0) # 限制当日未完件
data = data_all_feature_df[(data_all_feature_df.platform_type_name.isin([ 'ANDROID']))
                                   &(data_all_feature_df.sdk_type=='M')
                                   &(data_all_feature_df.register_month<='2024-01')
                                   &(data_all_feature_df['label0d']==0) #  
                                   &(data_all_feature_df['is_0d_curp_reject']==0) #  
                                    ][[label]+select_columns].sample(200000)

y = data[label]

print(len(y))

dtrain = lgb.Dataset(data[select_columns]
                        , y
                        , free_raw_data=False)

print('dtrain end')
print(dtrain.data.shape)



%%time
actual_imp_df = get_feature_importances_by_dtrain(dtrain, y, train_features=select_columns, shuffle=False, categorical_feats=[], seed=2024)
print('actual_imp_df end')


%%time
null_imp_df = pd.DataFrame()
nb_runs = 80
for i in tqdm(range(nb_runs)):
    # Get current run importances
    imp_df = get_feature_importances_by_dtrain(dtrain, y, train_features=select_columns, shuffle=True, categorical_feats=[], seed=2024)
    # Concat the latest importances with the old ones
    null_imp_df = pd.concat([null_imp_df, imp_df], axis=0)

print('null_imp_df end')

feature_scores = []
for _f in actual_imp_df['feature'].unique():
    f_null_imps_gain = null_imp_df.loc[null_imp_df['feature'] == _f, 'importance_gain'].values
    f_act_imps_gain = actual_imp_df.loc[actual_imp_df['feature'] == _f, 'importance_gain'].mean()
    gain_score = np.log(1e-10 + f_act_imps_gain / (1 + np.percentile(f_null_imps_gain, 75)))  # Avoid didvide by zero
    f_null_imps_split = null_imp_df.loc[null_imp_df['feature'] == _f, 'importance_split'].values
    f_act_imps_split = actual_imp_df.loc[actual_imp_df['feature'] == _f, 'importance_split'].mean()
    split_score = np.log(1e-10 + f_act_imps_split / (1 + np.percentile(f_null_imps_split, 75)))  # Avoid didvide by zero
    feature_scores.append((_f, split_score, gain_score))

scores_df = pd.DataFrame(feature_scores, columns=['feature', 'split_score', 'gain_score'])
scores_df = scores_df.sort_values('split_score', ascending=False)    


actual_imp_df.to_pickle(f'cuwanjian_actual_imp_df_{data_version}_{label_dt}.pkl')
null_imp_df.to_pickle(f'cuwanjian_null_imp_df_{data_version}_{label_dt}.pkl')
scores_df.to_pickle(f'cuwanjian_feature_scores_df_{data_version}_{label_dt}.pkl')

  • 5
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

mtj66

看心情

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值