LightGBM调参
1. 经验法:
往两个方向调:
1.提高准确率:max_depth, num_leaves, learning_rate
2.降低过拟合:max_bin, min_data_in_leaf;L1, L2正则化;数据抽样, 列采样
1.使用较小的num_leaves,max_depth和max_bin,降低复杂度。
2.使用min_data_in_leaf和min_sum_hessian_in_leaf,该值越大,模型的学习越保守。
3.设置bagging_freq和bagging_fraction使用bagging。
4.设置feature_fraction进行特征采样。
5.使用lambda_l1,lambda_l2和min_gain_to_split正则化。
2. 贪心调参:
先调整对模型影响最大的参数,再调整对模型影响次大的参数,缺点是容易调成局部最优,需要多次调试。日常调参顺序如下:
① num_leaves, max_depth
② min_data_in_leaf, min_child_weight
③ bagging_freq, bagging_fraction, feature_fraction,
④ reg_lambda, reg_alpha
⑤ min_split_gain
from sklearn.model_selection import cross_val_score
# 调objective
best_obj = dict()
for obj in objective:
model = LGBMRegressor(objective=obj)
score = cross_val_score(model, X_train, y_train, cv=5, scoring='f1').mean()
best_obj[obj] = score
# num_leaves
best_leaves = dict()
for leaves in num_leaves:
model = LGBMRegressor(objective=min(best_obj.items(), key=lambda x:x[1])[0], num_leaves=leaves)
score = cross_val_score(model, X_train, y_train, cv=5, scoring='f1').mean()
best_leaves[leaves] = score
# max_depth
best_depth = dict()
for depth in max_depth:
model = LGBMRegressor(objective=min(best_obj.items(), key=lambda x:x[1])[0],
num_leaves=min(best_leaves.items(), key=lambda x:x[1])[0],
max_depth=depth)
score = cross_val_score(model, X_train, y_train, cv=5, scoring='f1').mean()
best_depth[depth] = score
以此类推,按调参顺序依次调整优化,并且可以对每一个最优参数下模型的得分进行可视化。
3. 网格搜索
即穷举搜索,在参数数组里循环遍历,一般大数据集不会用到,因为速度太慢。
from sklearn.model_selection import GridSearchCV
def get_best_cv_params(learning_rate=0.1, n_estimators=581, num_leaves=31, max_depth=-1, bagging_fraction=1.0, feature_fraction=1.0, bagging_freq=0, min_data_in_leaf=20, min_child_weight=0.001, min_split_gain=0, reg_lambda=0, reg_alpha=0, param_grid=None):
cv_fold = KFold(n_splits=5, shuffle=True, random_state=2021)
model_lgb = lgb.LGBMClassifier(learning_rate=learning_rate,
n_estimators=n_estimators,
num_leaves=num_leaves,
max_depth=max_depth,
bagging_fraction=bagging_fraction,
feature_fraction=feature_fraction,
bagging_freq=bagging_freq,
min_data_in_leaf=min_data_in_leaf,
min_child_weight=min_child_weight,
min_split_gain=min_split_gain,
reg_lambda=reg_lambda,
reg_alpha=reg_alpha,
n_jobs= 8
)
f1 = make_scorer(f1_score, average='micro')
grid_search = GridSearchCV(estimator=model_lgb,
cv=cv_fold,
param_grid=param_grid,
scoring=f1
)
grid_search.fit(X_train, y_train)
print('模型当前最优参数为:{}'.format(grid_search.best_params_))
print('模型当前最优得分为:{}'.format(grid_search.best_score_))
总体思路是先粗调再细调。在一开始调整时,可设置较大的学习率如0.1,先确定树的个数,再依次调整参数,最后设置较小的学习率如0.05,确定最终参数。
lgb_params = {'num_leaves': range(10, 80, 5), 'max_depth': range(3,10,2)}
get_best_cv_params()
#----------------------------------------
lgb_params = {'num_leaves': range(25, 35, 1), 'max_depth': range(5,9,1)}
get_best_cv_params(n_estimators=85)
#----------------------------------------
lgb_params = {'bagging_fraction': [i/10 for i in range(5,10,1)],
'feature_fraction': [i/10 for i in range(5,10,1)],
'bagging_freq': range(0,81,10)}
get_best_cv_params(n_estimators=85, num_leaves=29, max_depth=7, min_data_in_leaf=45)
#----------------------------------------
lgb_params = {'reg_lambda': [0,0.001,0.01,0.03,0.08,0.3,0.5], 'reg_alpha': [0,0.001,0.01,0.03,0.08,0.3,0.5]}
get_best_cv_params(n_estimators=85, num_leaves=29, max_depth=7, min_data_in_leaf=45, bagging_fraction=0.9, feature_fraction=0.9, bagging_freq=40)
#----------------------------------------
lgb_params = {'min_split_gain': [i/10 for i in range(0,11,1)]}
get_best_cv_params(n_estimators=85, num_leaves=29, max_depth=7, min_data_in_leaf=45, bagging_fraction=0.9, feature_fraction=0.9, bagging_freq=40, min_split_gain=None)
#----------------------------------------
final_params = {
'boosting_type': 'gbdt',
'learning_rate': 0.01,
'num_leaves': 29,
'max_depth': 7,
'objective': 'multiclass',
'num_class': 4,
'min_data_in_leaf':45,
'min_child_weight':0.001,
'bagging_fraction': 0.9,
'feature_fraction': 0.9,
'bagging_freq': 40,
'min_split_gain': 0,
'reg_lambda':0,
'reg_alpha':0,
'nthread': 6
}
cv_result = lgb.cv(train_set=lgb_train,
early_stopping_rounds=20,
num_boost_round=5000,
nfold=5,
stratified=True,
shuffle=True,
params=final_params,
feval=f1_score_vali,
seed=0,
)
4. 贝叶斯调参
是一种用模型找到目标函数最小值的方法,比网格和随机搜索省时。步骤如下:
① 定义优化函数(rf_cv)
② 建立模型
③ 定义待优化的参数
④ 得到优化结果,并返回要优化的分数指标
from sklearn.model_selection import cross_val_score
#定义优化函数
def rf_cv_lgb(num_leaves, max_depth, bagging_fraction, feature_fraction, bagging_freq, min_data_in_leaf,
min_child_weight, min_split_gain, reg_lambda, reg_alpha):
# 建立模型
model_lgb = lgb.LGBMClassifier(boosting_type='gbdt', objective='multiclass', num_class=4,learning_rate=0.1, n_estimators=5000,num_leaves=int(num_leaves), max_depth=int(max_depth), bagging_fraction=round(bagging_fraction, 2), feature_fraction=round(feature_fraction, 2),bagging_freq=int(bagging_freq), min_data_in_leaf=int(min_data_in_leaf),min_child_weight=min_child_weight)
f1 = make_scorer(f1_score, average='micro')
val = cross_val_score(model_lgb, X_train_split, y_train_split, cv=5, scoring=f1).mean()
return val
from bayes_opt import BayesianOptimization
#定义优化参数
bayes_lgb = BayesianOptimization(
rf_cv_lgb,
{
'num_leaves':(10, 200),
'max_depth':(3, 20),
'bagging_fraction':(0.5, 1.0),
'feature_fraction':(0.5, 1.0),
'bagging_freq':(0, 100),
'min_data_in_leaf':(10,100),
'min_child_weight':(0, 10),
'min_split_gain':(0.0, 1.0),
'reg_alpha':(0.0, 10),
'reg_lambda':(0.0, 10),
}
)
#开始优化
bayes_lgb.maximize(n_iter=20)
#显示优化结果
bayes_lgb.max
参数优化完成后,可根据优化后的参数建立新的模型,降低学习率并寻找最优模型迭代次数。
#设置较小的学习率,并通过cv函数确定当前最优的迭代次数
base_params_lgb = {
'boosting_type': 'gbdt',
'objective': 'multiclass',
'num_class': 4,
'learning_rate': 0.01,
'num_leaves': 138,
'max_depth': 11,
'min_data_in_leaf': 43,
'min_child_weight': 6.5,
'bagging_fraction': 0.64,
'feature_fraction': 0.93,
'bagging_freq': 49,
'reg_lambda': 7,
'reg_alpha': 0.21,
'min_split_gain': 0.288,
'nthread': 10,
'verbose': -1,
}
cv_result_lgb = lgb.cv(
train_set=train_matrix,
early_stopping_rounds=1000,
num_boost_round=20000,
nfold=5,
stratified=True,
shuffle=True,
params=base_params_lgb,
feval=f1_score_vali,
seed=0
)
print('迭代次数{}'.format(len(cv_result_lgb['f1_score-mean'])))
print('最终模型的f1为{}'.format(max(cv_result_lgb['f1_score-mean'])))
模型参数确定之后,建立最终模型并对验证集进行验证。