python中LightGBM模型以及其他模型的使用

在做一个练习的过程中可能需要的包如下:

import pandas as pd
import numpy
import warnings
from sklearn.preprocessing import scale
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost.sklearn import XGBClassifier
import lightgbm as lgb

1. 划分X和Y

X为数据特征,即feature,Y为target, 即是否逾期的结果。逾期为1,没有逾期为0。

2. 划分特征值和标签值

wxl=表的名称['target']
wxl_X=表的名称.drop(columns=['target'])
wxl_X=scale(wxl_X,axis=0)  #将数据转化为标准数据

3. 将一个大的数据集划分成训练集和测试集

首先需要导入我们需要的panda包和re包和numpy包
import panda
import numpy
import re
#需要导入包
from sklearn.model_selection import train_test_split
#划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(feature, target, test_size=0.2)

4. 使用LightGBM模型进行预测以及结果评估

import lightgbm as lgb

lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference = lgb_train)
#lightgbm模型参数设置,根据自己的需求调一调
params = {
    'task':'train',
    'boosting_type':'gbdt',
    'objective':'binary',
    'metric':{'12','auc','binary_logloss'},
    'num_leaves':40,
    'learning_rate':0.05,
    'feature_fraction':0.9,
    'bagging_fraction':0.8,
    'bagging_freq':5,
    'verbose':0,
    'is_unbalance':True
      
}
#训练参数设置
gbm = lgb.train(params,lgb_train,num_boost_round=1000,valid_sets=lgb_eval,early_stopping_rounds=100)



#模型预测
lgb_pre = gbm.predict(X_test) #括号中需要输入与训练时相同的数据格式


#结果评估
from sklearn.metrics import roc_auc_score
auc_score = roc_auc_score(y_test, lgb_pre)


#模型保存
gbm.save_model('whx19961212.txt')
#模型加载
import lightgbm as lgb
gbm = lgb.Booster(model_file = 'whx19961212.txt')

5. 另外其他各种模型的构建

lr = LogisticRegression(random_state=2018,tol=1e-6)  # 逻辑回归模型

tree = DecisionTreeClassifier(random_state=2018) #决策树模型

svm = SVC(probability=True,random_state=2018,tol=1e-6)  # SVM模型

forest=RandomForestClassifier(n_estimators=100,random_state=2018) # 随机森林

Gbdt=GradientBoostingClassifier(random_state=2018) #CBDT

Xgbc=XGBClassifier(random_state=2018)  #XGBOOST

gbm=lgb.LGBMClassifier(random_state=2018)  #LightGbm

6. 各种评分函数的构建

def muti_score(model):
    warnings.filterwarnings('ignore')
    accuracy = cross_val_score(model, wxl_X, wxl_y, scoring='accuracy', cv=5)
    precision = cross_val_score(model, wxl_X, wxl_y, scoring='precision', cv=5)
    recall = cross_val_score(model, wxl_X, wxl_y, scoring='recall', cv=5)
    f1_score = cross_val_score(model, wxl_X, wxl_y, scoring='f1', cv=5)
    auc = cross_val_score(model, wxl_X, wxl_y, scoring='roc_auc', cv=5)
    print("准确率:",accuracy.mean())
    print("精确率:",precision.mean())
    print("召回率:",recall.mean())
    print("F1_score:",f1_score.mean())
    print("AUC:",auc.mean())
    
model_name=["lr","tree","svm","forest","Gbdt","Xgbc","gbm"]
for name in model_name:
    model=eval(name)
    print(name)
    muti_score(model)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值