lgbm

  1. lightgbm建模

     
  2. import lightgbm as lgbm

  3. from sklearn import metrics

  4. from sklearn import model_selection

  5.  
  6.  
  7. np.random.seed(42)

  8.  
  9. model = lgbm.LGBMRegressor(

  10. objective='regression',

  11. max_depth=5,

  12. num_leaves=25,

  13. learning_rate=0.007,

  14. n_estimators=1000,

  15. min_child_samples=80,

  16. subsample=0.8,

  17. colsample_bytree=1,

  18. reg_alpha=0,

  19. reg_lambda=0,

  20. random_state=np.random.randint(10e6)

  21. )

  22.  
  23. n_splits = 6

  24. cv = model_selection.KFold(n_splits=n_splits, shuffle=True, random_state=42)

  25.  
  26. val_scores = [0] * n_splits

  27.  
  28. sub = submission['id'].to_frame()

  29. sub['visitors'] = 0

  30.  
  31. feature_importances = pd.DataFrame(index=X_train.columns)

  32.  
  33. for i, (fit_idx, val_idx) in enumerate(cv.split(X_train, y_train)):

  34.  
  35. X_fit = X_train.iloc[fit_idx]

  36. y_fit = y_train.iloc[fit_idx]

  37. X_val = X_train.iloc[val_idx]

  38. y_val = y_train.iloc[val_idx]

  39.  
  40. model.fit(

  41. X_fit,

  42. y_fit,

  43. eval_set=[(X_fit, y_fit), (X_val, y_val)],

  44. eval_names=('fit', 'val'),

  45. eval_metric='l2',

  46. early_stopping_rounds=200,

  47. feature_name=X_fit.columns.tolist(),

  48. verbose=False

  49. )

  50.  
  51. val_scores[i] = np.sqrt(model.best_score_['val']['l2'])

  52. sub['visitors'] += model.predict(X_test, num_iteration=model.best_iteration_)

  53. feature_importances[i] = model.feature_importances_

  54.  
  55. print('Fold {} RMSLE: {:.5f}'.format(i+1, val_scores[i]))

  56.  
  57. sub['visitors'] /= n_splits

  58. sub['visitors'] = np.expm1(sub['visitors'])

  59.  
  60. val_mean = np.mean(val_scores)

  61. val_std = np.std(val_scores)

  62.  

下面我是用LightGBM的cv函数进行演示:

params = {    'boosting_type': 'gbdt', 
    'objective': 'regression', 

    'learning_rate': 0.1, 
    'num_leaves': 50, 
    'max_depth': 6,    'subsample': 0.8, 
    'colsample_bytree': 0.8, 
    }
data_train = lgb.Dataset(df_train, y_train, silent=True)
cv_results = lgb.cv(
    params, data_train, num_boost_round=1000, nfold=5, stratified=False, shuffle=True, metrics='rmse',
    early_stopping_rounds=50, verbose_eval=50, show_stdv=True, seed=0)

print('best n_estimators:', len(cv_results['rmse-mean']))
print('best cv score:', cv_results['rmse-mean'][-1])

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值