python模型lgbm调优工具_Python lightgbm.LGBMClassifier方法代码示例

本文详细介绍了Python中的lightgbm.LGBMClassifier方法,通过多个代码示例展示了如何使用该方法进行模型训练、评估和调优。包括训练、预测、交叉验证以及与其他模型比较等应用场景,适用于初学者和进阶者。
摘要由CSDN通过智能技术生成

本文整理汇总了Python中lightgbm.LGBMClassifier方法的典型用法代码示例。如果您正苦于以下问题:Python lightgbm.LGBMClassifier方法的具体用法?Python lightgbm.LGBMClassifier怎么用?Python lightgbm.LGBMClassifier使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块lightgbm的用法示例。

在下文中一共展示了lightgbm.LGBMClassifier方法的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: Train

​点赞 9

# 需要导入模块: import lightgbm [as 别名]

# 或者: from lightgbm import LGBMClassifier [as 别名]

def Train(data, modelcount, censhu, yanzhgdata):

model = lgbm.LGBMClassifier(boosting_type='gbdt', objective='binary', num_leaves=50,

learning_rate=0.1, n_estimators=modelcount, max_depth=censhu,

bagging_fraction=0.9, feature_fraction=0.9, reg_lambda=0.2)

model.fit(data[:, :-1], data[:, -1])

# 给出训练数据的预测值

train_out = model.predict(data[:, :-1])

# 计算f1度量

train_mse = fmse(data[:, -1], train_out)[0]

# 给出验证数据的预测值

add_yan = model.predict(yanzhgdata[:, :-1])

# 计算f1度量

add_mse = fmse(yanzhgdata[:, -1], add_yan)[0]

print(train_mse, add_mse)

return train_mse, add_mse

# 最终确定组合的函数

开发者ID:Anfany,项目名称:Machine-Learning-for-Beginner-by-Python3,代码行数:21,

示例2: recspre

​点赞 6

# 需要导入模块: import lightgbm [as 别名]

# 或者: from lightgbm import LGBMClassifier [as 别名]

def recspre(estrs, predata, datadict, zhe):

mo, ze = estrs.split('-')

model = lgbm.LGBMClassifier(boosting_type='gbdt', objective='binary', num_leaves=50,

learning_rate=0.1, n_estimators=int(mo), max_depth=int(ze),

bagging_fraction=0.9, feature_fraction=0.9, reg_lambda=0.2)

model.fit(datadict[zhe]['train'][:, :-1], datadict[zhe]['train'][:, -1])

# 预测

yucede = model.predict(predata[:, :-1])

# 计算混淆矩阵

print(ConfuseMatrix(predata[:, -1], yucede))

return fmse(predata[:, -1], yucede)

# 主函数

开发者ID:Anfany,项目名称:Machine-Learning-for-Beginner-by-Python3,代码行数:20,

示例3: test_cv_lgbm

​点赞 6

# 需要导入模块: import lightgbm [as 别名]

# 或者: from lightgbm import LGBMClassifier [as 别名]

def test_cv_lgbm():

X, y = make_classification(n_samples=1024, n_features=20, class_sep=0.98, random_state=0)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)

models = [LGBMClassifier(n_estimators=300) for _ in range(5)]

pred_oof, pred_test, scores, importance = cross_validate(models, X_train, y_train, X_test, cv=5,

eval_func=roc_auc_score,

fit_params={'early_stopping_rounds': 200})

print(scores)

assert len(scores) == 5 + 1

assert scores[-1] >= 0.85 # overall roc_auc

assert roc_auc_score(y_train, pred_oof) == scores[-1]

assert roc_auc_score(y_test, pred_test) >= 0.85 # test roc_auc

assert roc_auc_score(y, models[0].predict_proba(X)[:, 1]) >= 0.85 # make sure models are trained

assert len(importance) == 5

assert list(importance[0].columns) == ['feature', 'importance']

assert len(importance[0]) == 20

开发者ID:nyanp,项目名称:nyaggle,代码行数:21,

示例4: test_cv_lgbm_df

​点赞 6

# 需要导入模块: import lightgbm [as 别名]

# 或者: from lightgbm import LGBMClassifier [as 别名]

def test_cv_lgbm_df():

X, y = make_classification_df(n_samples=1024, n_num_features=20, n_cat_features=1, class_sep=0.98, random_state=0)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)

models = [LGBMClassifier(n_estimators=300) for _ in range(5)]

pred_oof, pred_test, scores, importance = cross_validate(models, X_train, y_train, X_test, cv=5,

eval_func=roc_auc_score)

print(scores)

assert len(scores) == 5 + 1

assert scores[-1] >= 0.85 # overall roc_auc

assert roc_auc_score(y_train, pred_oof) == scores[-1]

assert roc_auc_score(y_test, pred_test) >= 0.85 # test roc_auc

assert roc_auc_score(y_test, models[0].predict_proba(X_test)[:, 1]) >= 0.85 # make sure models are trained

assert len(importance) == 5

assert list(importance[0].columns) == ['feature', 'importance']

assert len(importance[0]) == 20 + 1

assert models[0].booster_.num_trees() < 300 # making sure early stopping worked

开发者ID:nyanp,项目名称:nyaggle,代码行数:21,

示例5: test_fit_params_callback

​点赞 6

# 需要导入模块: import lightgbm [as 别名]

# 或者: from lightgbm import LGBMClassifier [as 别名]

def test_fit_params_callback():

X, y = make_classification(n_samples=1024, n_features=20, class_sep=0.98, random_state=0)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)

models = [LGBMClassifier(n_estimators=300) for _ in range(5)]

sample_weights = np.random.randint(1, 10, size=len(X_train))

sample_weights = sample_weights / sample_weights.sum()

def fit_params(n: int, train_index: List[int], valid_index: List[int]):

return {

'early_stopping_rounds': 100,

'sample_weight': list(sample_weights[train_index]),

'eval_sample_weight': [list(sample_weights[valid_index])]

}

result_w_weight = cross_validate(models, X_train, y_train, X_test, cv=5,

eval_func=roc_auc_score, fit_params=fit_params)

result_wo_weight = cross_validate(models, X_train, y_train, X_test, cv=5,

eval_func=roc_auc_score, fit_params={'early_stopping_rounds': 50})

assert result_w_weight.scores[-1] != result_wo_weight.scores[-1]

开发者ID:nyanp,项目名称:nyaggle,代码行数:25,

示例6: __init__

​点赞 6

# 需要导入模块: import lightgbm [as 别名]

# 或者: from lightgbm import LGBMClassifier [as 别名]

def __init__(self):

self._models = dict()

try:

import sklearn.ensemble

self._models['RandomForestClassifier'] = sklearn.ensemble.RandomForestClassifier

except ImportError:

pass

try:

import xgboost

self._models['XGBClassifier'] = xgboost.XGBClassifier

except ImportError:

pass

try:

import lightgbm

self._models['LGBMClassifie

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值