【天池】贷款违约预测task3

【天池】贷款违约预测task3——特征工程篇

阶段目标:1)特征预处理:包括缺失值处理、异常值处理、特征分框;2)特征编码:对象型特征编码、特征归一化等;3)特征筛选:过滤无用特征(卡方、相关系数、正则化项等);4)简单建模:XGBoost、LightGBM

# 3阶段目标 特征工程
# 特征预处理、缺失值异常值处理、数据分桶
# 特征交互、编码

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.preprocessing import MinMaxScaler
import xgboost as xgb
import lightgbm as lgb
from catboost import CatBoostRegressor
import warnings
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, log_loss
warnings.filterwarnings('ignore')


data_train =pd.read_csv(r'/home/corn/桌面/tianchifengkong/train.csv')
data_test_a = pd.read_csv(r'/home/corn/桌面/tianchifengkong/testA.csv')

# 1.填补缺失值  1)直接用0替代 2)用附近值替代  3)用平均值替代  4)用众数替代

numerical_fea = list(data_train.select_dtypes(exclude=['object']).columns)
category_fea = list(filter(lambda x: x not in numerical_fea,list(data_train.columns)))
label = 'isDefault'
numerical_fea.remove(label)

# 1.1.按照平均数填充数值型特征
data_train[numerical_fea] = data_train[numerical_fea].fillna(data_train[numerical_fea].median())
data_test_a[numerical_fea] = data_test_a[numerical_fea].fillna(data_train[numerical_fea].median())

# 1.2.按照众数填充类别型特征
data_train[category_fea] = data_train[category_fea].fillna(data_train[category_fea].mode())
data_test_a[category_fea] = data_test_a[category_fea].fillna(data_train[category_fea].mode())

# 1.3.时间格式数据处理 pandas时间戳与时间互相转化

for data in [data_train, data_test_a]:
    data['issueDate'] = pd.to_datetime(data['issueDate'],format='%Y-%m-%d')  #贷款发放月份
    startdate = datetime.datetime.strptime('2007-06-01', '%Y-%m-%d')
#构造时间特征
    data['issueDateDT'] = data['issueDate'].apply(lambda x: x-startdate).dt.days

# 1.4.对象类型转数值

def employmentLength_to_int(s):
    if pd.isnull(s):
        return s
    else:
        return np.int8(s.split()[0])
for data in [data_train, data_test_a]:
    data['employmentLength'].replace(to_replace='10+ years', value='10 years', inplace=True)  #收雇年份
    data['employmentLength'].replace('< 1 year', '0 years', inplace=True)
    data['employmentLength'] = data['employmentLength'].apply(employmentLength_to_int)

for data in [data_train, data_test_a]:
    data['earliesCreditLine'] = data['earliesCreditLine'].apply(lambda s: int(s[-4:])) #只取年份转int

# 1.5.类别特征处理
cate_features = ['grade', 'subGrade', 'employmentTitle', 'homeOwnership',\
                 'verificationStatus', 'purpose', 'postCode', 'regionCode', \
'applicationType', 'initialListStatus', 'title', 'policyCode']
# for f in cate_features:
#     print(f, '类型数:', data[f].nunique())

for data in [data_train, data_test_a]:
    data['grade'] = data['grade'].map({'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7})  #类别自映射
    
# 类型数在2之上,又不是高维稀疏的,且纯分类特征 one hot 映射  pd.get_dummies
for data in [data_train, data_test_a]:
    data = pd.get_dummies(data, columns=['subGrade', 'homeOwnership', 'verificationStatus',\
'purpose', 'regionCode'], drop_first=True)
    
# 2.异常值处理 实用于非研究目标,异常值不代表某类典型现象,test集不处理

# 2.1.均方差方法,超过三倍标准差的数据认为其异常
def find_outliers_by_3segama(data,fea):
    data_std = np.std(data[fea])
    data_mean = np.mean(data[fea])
    outliers_cut_off = data_std * 3
    lower_rule = data_mean - outliers_cut_off
    upper_rule = data_mean + outliers_cut_off
    data[fea+'_outliers'] = data[fea].apply(lambda x:str('异常值') if x > upper_rule \
or x < lower_rule else '正常值')
    return data
data_train = data_train.copy()
for fea in numerical_fea:
    data_train = find_outliers_by_3segama(data_train,fea)

#删除异常值
for fea in numerical_fea:
    data_train = data_train[data_train[fea+'_outliers']=='正常值']
    data_train = data_train.reset_index(drop=True)

#2.2. 箱型图法,1)固定分箱,2)分位数分箱,3)卡方分箱
data['loanAmnt_bin3'] = pd.qcut(data['loanAmnt'], 10, labels=False)  #分位数分箱

# 3.特征编码
for col in tqdm(['employmentTitle', 'postCode', 'title','subGrade']):
    le = LabelEncoder()
    le.fit(list(data_train[col].astype(str).values) +\
list(data_test_a[col].astype(str).values))
    data_train[col] = le.transform(list(data_train[col].astype(str).values))
    data_test_a[col] = le.transform(list(data_test_a[col].astype(str).values))
print('Label Encoding 完成')
100%|██████████| 4/4 [00:03<00:00,  1.13it/s]

Label Encoding 完成
# 3.特征编码
# 高维类别特征需要进行转换
for col in tqdm(['employmentTitle', 'postCode', 'title','subGrade']):
    le = LabelEncoder()  #标签编码
    le.fit(list(data_train[col].astype(str).values) + list(data_test_a[col].astype(str).values))
    data_train[col] = le.transform(list(data_train[col].astype(str).values))
    data_test_a[col] = le.transform(list(data_test_a[col].astype(str).values))
print('Label Encoding 完成')
100%|██████████| 4/4 [00:03<00:00,  1.27it/s]

Label Encoding 完成
# 3.1.逻辑回归等模型需要增加特征工程
# 1. 对特征做归一化,去除相关性高的特征
# 2. 归一化目的是让训练过程更好更快的收敛,避免特征大吃小的问题
# 3. 去除相关性是增加模型的可解释性,加快预测过程。

#归一化伪代码
# for fea in [要归一化的特征列表]:
#     data[fea] = ((data[fea] - np.min(data[fea])) / (np.max(data[fea]) - np.min(data[fea])))


# 4.特征选择
# 简化无用特征,降低模型复杂性,提升计算速度。
# 特征选择不是为了减少训练时间(实际上,一些技术会增加总体训练时间),而是为了减少模型评分时间。

# 4.1. 方差选择法
from sklearn.feature_selection import VarianceThreshold
#其中参数threshold为方差的阈值
VarianceThreshold(threshold=3).fit_transform(train,target_train)

# 4.2. pearson系数选择法

from sklearn.feature_selection import SelectKBest
from scipy.stats import pearsonr
#选择K个最好的特征,返回选择特征后的数据
#第一个参数为计算评估特征是否好的函数,该函数输入特征矩阵和目标向量,
#输出二元组(评分,P值)的数组,数组第i项为第i个特征的评分和P值。在此定义为计算相关系数
#参数k为选择的特征个数
SelectKBest(k=5).fit_transform(train,target_train)

# 4.3. 卡方检验法  
# 正定矩阵

from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
#参数k为选择的特征个数
SelectKBest(chi2, k=5).fit_transform(train,target_train)

# 4.4. 递归消除
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
#递归特征消除法,返回特征选择后的数据
#参数estimator为基模型
#参数n_features_to_select为选择的特征个数
RFE(estimator=LogisticRegression(),
n_features_to_select=2).fit_transform(train,target_train)

# 4.5. 带正则项
# 使用带惩罚项的基模型,除了筛选出特征外,同时也进行了降维。
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LogisticRegression
#带L1惩罚项的逻辑回归作为基模型的特征选择
SelectFromModel(LogisticRegression(penalty="l1", C=0.1)).fit_transform(train,target_train)

 
# 4.6. GBDT 基于树模型的特征选择
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import GradientBoostingClassifier
#GBDT作为基模型的特征选择
SelectFromModel(GradientBoostingClassifier()).fit_transform(train,target_train)

# 5. 模型训练
# 本数据集中我们删除非入模特征后,并对缺失值填充,然后用计算协方差的方式看一下特征间相关性,然后进行模型训练

%matplotlib inline
# data_train =pd.read_csv(r'/home/corn/桌面/tianchifengkong/train.csv')
# data_test_a = pd.read_csv(r'/home/corn/桌面/tianchifengkong/testA.csv')
# 删除不需要的数据
# for data in [data_train, data_test_a]:
#      data.drop(['issueDate','id'], axis=1,inplace=True)
    
# #纵向用缺失值上面的值替换缺失值
# data_train = data_train.fillna(axis=0,method = 'ffill')


# x_train = data_train.drop(['isDefault','id'], axis=1)

x_train = data_train
#计算协方差
data_corr = x_train.corrwith(data_train.isDefault) #计算相关性
result = pd.DataFrame(columns=['features', 'corr'])
result['features'] = data_corr.index
result['corr'] = data_corr.values

# 当然也可以直接看图
data_numeric = data_train[numerical_fea]
correlation = data_numeric.corr()
f , ax = plt.subplots(figsize = (7, 7))
plt.title('Correlation of Numeric Features with Price',y=1,size=16)
sns.heatmap(correlation,square = True, vmax=0.8)
<AxesSubplot:title={'center':'Correlation of Numeric Features with Price'}>

在这里插入图片描述

features = [f for f in data_train.columns if f not in ['id','issueDate','isDefault'] and
'_outliers' not in f]
x_train = data_train[features]
x_test = data_test_a[features]
y_train = data_train['isDefault']
def cv_model(clf, train_x, train_y, test_x, clf_name):
    
    folds = 5
    seed = 2020
    kf = KFold(n_splits=folds, shuffle=True, random_state=seed)
    train = np.zeros(train_x.shape[0])
    test = np.zeros(test_x.shape[0])
    cv_scores = []
    for i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)):
        print('************************************ {}\
************************************'.format(str(i+1)))
        trn_x, trn_y, val_x, val_y = train_x.iloc[train_index], train_y[train_index],\
train_x.iloc[valid_index], train_y[valid_index]

        if clf_name == "lgb":
        
            train_matrix = clf.Dataset(trn_x, label=trn_y)
            valid_matrix = clf.Dataset(val_x, label=val_y)
            params = {
                'boosting_type': 'gbdt',
                'objective': 'binary',
                'metric': 'auc',
                'min_child_weight': 5,
                'num_leaves': 2 ** 5,
                'lambda_l2': 10,
                'feature_fraction': 0.8,
                'bagging_fraction': 0.8,
                'bagging_freq': 4,
                'learning_rate': 0.1,
                'seed': 2020,
                'nthread': 28,
                'n_jobs':24,
                'silent': True,
                'verbose': -1,
            }
            model = clf.train(params, train_matrix, 50000, valid_sets=[train_matrix,\
valid_matrix], verbose_eval=200,early_stopping_rounds=200)
            val_pred = model.predict(val_x, num_iteration=model.best_iteration)
            test_pred = model.predict(test_x, num_iteration=model.best_iteration)
    # print(list(sorted(zip(features, model.feature_importance("gain")), key=lambdax: x[1], reverse=True))[:20])
        if clf_name == "xgb":
            train_matrix = clf.DMatrix(trn_x , label=trn_y)
            valid_matrix = clf.DMatrix(val_x , label=val_y)
            params = {'booster': 'gbtree',
                      'objective': 'binary:logistic',
                      'eval_metric': 'auc',
                      'gamma': 1,
                      'min_child_weight': 1.5,
                      'max_depth': 5,
                      'lambda': 10,
                      'subsample': 0.7,
                      'colsample_bytree': 0.7,
                      'colsample_bylevel': 0.7,
                      'eta': 0.04,
                      'tree_method': 'exact',         
                      'seed': 2020,
                      'nthread': 36,
                      "silent": True,
                      }
            watchlist = [(train_matrix, 'train'),(valid_matrix, 'eval')]
            model = clf.train(params, train_matrix, num_boost_round=50000, evals=watchlist,\
                              verbose_eval=200, early_stopping_rounds=200)
            val_pred = model.predict(valid_matrix, ntree_limit=model.best_ntree_limit)
            test_pred = model.predict(test_x , ntree_limit=model.best_ntree_limit)

        if clf_name == "cat":        
            params = {'learning_rate': 0.05, 
                      'depth': 5, 
                      'l2_leaf_reg': 10,
                      'bootstrap_type': 'Bernoulli',
                      'od_type': 'Iter', 
                      'od_wait': 50, 'random_seed': 11,
                      'allow_writing_files': False
                     }

            model = clf(iterations=20000, **params)
            model.fit(trn_x, trn_y, eval_set=(val_x, val_y),\
                      cat_features=[], use_best_model=True, verbose=500)

            val_pred = model.predict(val_x)
            test_pred = model.predict(test_x)

        train[valid_index] = val_pred
        test = test_pred / kf.n_splits
        cv_scores.append(roc_auc_score(val_y, val_pred))

        print(cv_scores)

    print("%s_scotrainre_list:" % clf_name, cv_scores)
    print("%s_score_mean:" % clf_name, np.mean(cv_scores))
    print("%s_score_std:" % clf_name, np.std(cv_scores))
    return train, test


def lgb_model(x_train, y_train, x_test):
    lgb_train, lgb_test = cv_model(lgb, x_train, y_train, x_test, "lgb")
    return lgb_train, lgb_test
def xgb_model(x_train, y_train, x_test):
    xgb_train, xgb_test = cv_model(xgb, x_train, y_train, x_test, "xgb")
    return xgb_train, xgb_test
def cat_model(x_train, y_train, x_test):
    cat_train, cat_test = cv_model(CatBoostRegressor, x_train, y_train, x_test, "cat")
    
lgb_train, lgb_test = lgb_model(x_train, y_train, x_test)
************************************ 1************************************
[LightGBM] [Warning] num_threads is set with n_jobs=24, nthread=28 will be ignored. Current value: num_threads=24
[LightGBM] [Warning] Unknown parameter: silent
Training until validation scores don't improve for 200 rounds
[200]	training's auc: 0.74909	valid_1's auc: 0.729732
[400]	training's auc: 0.764514	valid_1's auc: 0.730261
[600]	training's auc: 0.777878	valid_1's auc: 0.730283
Early stopping, best iteration is:
[541]	training's auc: 0.774137	valid_1's auc: 0.730481
[0.7304805291626117]
************************************ 2************************************
[LightGBM] [Warning] num_threads is set with n_jobs=24, nthread=28 will be ignored. Current value: num_threads=24
[LightGBM] [Warning] Unknown parameter: silent
Training until validation scores don't improve for 200 rounds
[200]	training's auc: 0.748524	valid_1's auc: 0.731572
[400]	training's auc: 0.764231	valid_1's auc: 0.732453
[600]	training's auc: 0.778023	valid_1's auc: 0.732473
Early stopping, best iteration is:
[592]	training's auc: 0.777512	valid_1's auc: 0.732533
[0.7304805291626117, 0.7325333805806149]
************************************ 3************************************
[LightGBM] [Warning] num_threads is set with n_jobs=24, nthread=28 will be ignored. Current value: num_threads=24
[LightGBM] [Warning] Unknown parameter: silent
Training until validation scores don't improve for 200 rounds
[200]	training's auc: 0.748256	valid_1's auc: 0.733065
[400]	training's auc: 0.763732	valid_1's auc: 0.733821
[600]	training's auc: 0.777215	valid_1's auc: 0.733752
Early stopping, best iteration is:
[456]	training's auc: 0.767666	valid_1's auc: 0.733934
[0.7304805291626117, 0.7325333805806149, 0.7339337227125723]
************************************ 4************************************
[LightGBM] [Warning] num_threads is set with n_jobs=24, nthread=28 will be ignored. Current value: num_threads=24
[LightGBM] [Warning] Unknown parameter: silent
Training until validation scores don't improve for 200 rounds
[200]	training's auc: 0.749134	valid_1's auc: 0.728116
[400]	training's auc: 0.764696	valid_1's auc: 0.728999
Early stopping, best iteration is:
[399]	training's auc: 0.764622	valid_1's auc: 0.729022
[0.7304805291626117, 0.7325333805806149, 0.7339337227125723, 0.7290224622861343]
************************************ 5************************************
[LightGBM] [Warning] num_threads is set with n_jobs=24, nthread=28 will be ignored. Current value: num_threads=24
[LightGBM] [Warning] Unknown parameter: silent
Training until validation scores don't improve for 200 rounds
[200]	training's auc: 0.748309	valid_1's auc: 0.733303
[400]	training's auc: 0.763856	valid_1's auc: 0.733901
Early stopping, best iteration is:
[363]	training's auc: 0.76104	valid_1's auc: 0.733908
[0.7304805291626117, 0.7325333805806149, 0.7339337227125723, 0.7290224622861343, 0.7339081024026323]
lgb_scotrainre_list: [0.7304805291626117, 0.7325333805806149, 0.7339337227125723, 0.7290224622861343, 0.7339081024026323]
lgb_score_mean: 0.7319756394289131
lgb_score_std: 0.0019409373213926035
  • 0
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值