零基础数据挖掘——金融风控(五)实践树类模型

1、前言

在重看我的项目的过程中发现自己对于相关知识点理解并不透彻,希望能理论联系实际,加深自己对基础知识的理解。项目来源于阿里天池学习赛——零基础入门金融风控-贷款违约预测,感兴趣的小伙伴可以自己去原文了解。

2、特征工程

①处理时间特征['issueDate']:

  • ['issueDate']转化为与2007-06-01的时间差['issueDateDT'],按天计算。
  • ['issueDate']转化为代表贷款月份的['issueDateM'],再转化为one-hot

②['employmentLength']转化为纯数字表示工作年份

③取['earliesCreditLine']最后4个数字作为年份

④自定义编码处理['grade']和['subGrade']

⑤删除部分特征值

  • ['issueDate']已经被转化了
  • ['policyCode']只有一个特征,没有意义
  • ['id']没有意义
     
import numpy as np
import pandas as pd
import datetime
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score
 
#导入数据
data_train = pd.read_csv('D:/myP/financial_risk/train.csv')
data_testA = pd.read_csv('D:/myP/financial_risk/testA.csv')
 
#将['issueDate']转化为距'2007-06-01'的天数
for data in [data_train, data_testA]:
    data['issueDate'] = pd.to_datetime(data['issueDate'],format='%Y-%m-%d')
    startdate = datetime.datetime.strptime('2007-06-01', '%Y-%m-%d')
    data['issueDateDT'] = data['issueDate'].apply(lambda x: x-startdate).dt.days
 
#将['issueDate']转化贷款申请的月份
for data in [data_train, data_testA]:
    data['issueDate'] = pd.to_datetime(data['issueDate'],format='%Y-%m-%d')
    data['issueDateM'] = data['issueDate'].dt.month
 
#将employmentLength转化为纯数字表示工作年份
def employmentLength_to_int(s):
    if pd.isnull(s):
        return s
    else:
        return np.int8(s.split()[0])
for data in [data_train, data_testA]:
    data['employmentLength'].replace(to_replace='10+ years', value='10 years', inplace=True)
    data['employmentLength'].replace('< 1 year', '0 years', inplace=True)
    data['employmentLength'] = data['employmentLength'].apply(employmentLength_to_int)
 
#取['earliesCreditLine']最后4个数字作为年份
for data in [data_train, data_testA]:
    data['earliesCreditLine'] = data['earliesCreditLine'].apply(lambda s: int(s[-4:]))
data['earliesCreditLine'].value_counts(dropna=False).sort_index()
 
#处理['grade']和['subGrade'],自定义编码
for data in [data_train, data_testA]:
    data['grade'] = data['grade'].map({'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7})
    data['subGrade'] = data['subGrade'].map({'A1':1,'A2':2,'A3':3,'A4':4,'A5':5,'B1':6,'B2':7,'B3':8,'B4':9,'B5':10,'C1':11,'C2':12,'C3':13,'C4':14,'C5':15,'D1':16,'D2':17,'D3':18,'D4':19,'D5':20, 'E1':21,'E2':22,'E3':23,'E4':24,'E5':25, 'F1':26,'F2':27,'F3':28,'F4':29,'F5':30, 'G1':31,'G2':32,'G3':33,'G4':34,'G5':35})
 
#删除几个不需要的特征
delFea = ['policyCode','id','issueDate']
for i in delFea:
    data_train.drop(i,axis = 1,inplace = True)
    data_testA.drop(i,axis = 1,inplace = True)

data_train.to_csv('D:/myP/financial_risk/trainfortree.csv', index = 0)
data_testA.to_csv('D:/myP/financial_risk/testAfortree.csv', index = 0)

3、决策树

4、LightGBM

4.1 确定LGM中决策树的数量n_estimators

import lightgbm as lgb
train = pd.read_csv('D:/myP/financial_risk/trainforclass.csv')
x_train, x_vali, y_train, y_vali = train_test_split(train.drop('isDefault', axis = 1), train['isDefault'], test_size=0.25)
params = {    
          'boosting_type': 'gbdt',
          'objective': 'binary',
          'metric': 'auc',
          'nthread':10,
          'learning_rate':0.1,
           'num_leaves':30, 
          'max_depth': 8,   
          'subsample': 0.8, 
          'colsample_bytree': 0.8, 
    }
all_train = lgb.Dataset(x_train, y_train)
cv_results = lgb.cv(params, all_train, num_boost_round=1000, nfold=5, stratified=False, shuffle=True, metrics='auc',early_stopping_rounds=50,seed=2022)
print('best n_estimators:', len(cv_results['auc-mean']))
print('best cv score:', pd.Series(cv_results['auc-mean']).max())
best n_estimators: 385
best cv score: 0.7344472095635076

4.2 确定max_depth和num_leaves

from sklearn.grid_search import GridSearchCV
params_test1={'max_depth': range(3,8,1), 'num_leaves':range(5, 100, 5)}
              
gsearch1 = GridSearchCV(estimator = lgb.LGBMClassifier(boosting_type='gbdt',objective='binary',metrics='auc',learning_rate=0.1, n_estimators=188, max_depth=6, bagging_fraction = 0.8,feature_fraction = 0.8), 
                       param_grid = params_test1, scoring='roc_auc',cv=5,n_jobs=-1)
gsearch1.fit(X_train,y_train)
gsearch1.best_params_, gsearch1.best_score_
({'max_depth': 5, 'num_leaves': 35}, 0.7349047555835462)

4.3 确定'max_bin'和'min_data_in_leaf'

params_test2={'max_bin': range(5,256,10), 'min_data_in_leaf':range(1,102,10)}  
gsearch2 = GridSearchCV(estimator = lgb.LGBMClassifier(boosting_type='gbdt',objective='binary',metrics='auc',learning_rate=0.1, n_estimators=385, max_depth=5, num_leaves=35,bagging_fraction = 0.8,feature_fraction = 0.8), param_grid = params_test2, scoring='roc_auc',cv=5, n_jobs=-1)
gsearch2.fit(x_train,y_train)
gsearch2.best_params_, gsearch2.best_score_
({'max_bin': 205, 'min_data_in_leaf': 101}, 0.7353558693303508)

4.4 确定'feature_fraction'、'bagging_fraction'和'bagging_freq'

params_test3={'feature_fraction': [0.6,0.7,0.8,0.9,1.0],
              'bagging_fraction': [0.6,0.7,0.8,0.9,1.0],
              'bagging_freq': range(0,81,10)
}         
gsearch3 = GridSearchCV(estimator = lgb.LGBMClassifier(boosting_type='gbdt',objective='binary',metrics='auc',learning_rate=0.1, n_estimators=385, max_depth=5, num_leaves=35,max_bin=205,min_data_in_leaf=101), 
                       param_grid = params_test3, scoring='roc_auc',cv=5,n_jobs=-1)
gsearch3.fit(x_train,y_train)
gsearch3.best_params_, gsearch3.best_score_
({'bagging_fraction': 0.6, 'bagging_freq': 0, 'feature_fraction': 0.8},
 0.7353558693303508)

4.5 确定'lambda_l1'和'lambda_l2'

params_test4={'lambda_l1': [1e-5,1e-3,1e-1,0.0,0.1,0.3,0.5,0.7,0.9,1.0],
              'lambda_l2': [1e-5,1e-3,1e-1,0.0,0.1,0.3,0.5,0.7,0.9,1.0]
}
              
gsearch4 = GridSearchCV(estimator = lgb.LGBMClassifier(boosting_type='gbdt',objective='binary',metrics='auc',learning_rate=0.1, n_estimators=385, max_depth=5, num_leaves=35,max_bin=205,min_data_in_leaf=101,bagging_fraction=0.6,bagging_freq= 0, feature_fraction= 0.8), 
                       param_grid = params_test4, scoring='roc_auc',cv=5, n_jobs=-1)
gsearch4.fit(x_train,y_train)
gsearch4.best_params_, gsearch4.best_score_
({'lambda_l1': 1.0, 'lambda_l2': 0.3}, 0.735480368997539)

4.6 确定'min_split_gain'

params_test5={'min_split_gain':[0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]}       
gsearch5 = GridSearchCV(estimator = lgb.LGBMClassifier(boosting_type='gbdt',objective='binary',metrics='auc',learning_rate=0.1, n_estimators=385, max_depth=5, num_leaves=35,max_bin=205,min_data_in_leaf=101,bagging_fraction=0.6,bagging_freq= 0, feature_fraction= 0.8,
lambda_l1=1,lambda_l2=0.3), param_grid = params_test5, scoring='roc_auc',cv=5, n_jobs=-1)
gsearch5.fit(x_train,y_train)
gsearch5.best_params_, gsearch5.best_score_
({'min_split_gain': 0.0}, 0.735480368997539)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值