金融风控实战——逻辑回归与评分卡模型(下)

import pandas as pd
from sklearn.metrics import roc_auc_score,roc_curve,auc
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import numpy as np
import random
import math

data = pd.read_csv('/Users/zhucan/Desktop/金融风控实战/第五课资料/Acard.txt')
data.shape
#(95806, 13)
data.head()

'''看一下月份分布,我们用最后一个月做为跨时间验证集合'''
'''obs_mth关注日期'''
data.obs_mth.unique()
set(data.obs_mth)
#array(['2018-10-31', '2018-07-31', '2018-09-30', '2018-06-30',
#       '2018-11-30'], dtype=object)

train = data[data.obs_mth != '2018-11-30'].reset_index().copy()
val = data[data.obs_mth == '2018-11-30'].reset_index().copy()

#这是我们全部的变量,info结尾的是自己做的无监督系统输出的个人表现,score结尾的是收费的外部征信数据
feature_lst = ['person_info','finance_info','credit_info','act_info','td_score','jxl_score','mj_score','rh_score']

x = train[feature_lst]
y = train['bad_ind']

val_x =  val[feature_lst]
val_y = val['bad_ind']

lr_model = LogisticRegression(C=0.1,penalty="l2") #正则项用L2比较好
lr_model.fit(x,y) 
#LogisticRegression(C=0.1)

模型评价

  • KS值
  • ROC曲线

描绘的是不同的截断点时,并以FPR和TPR为横纵坐标轴,描述随着截断点的变小,TPR随着FPR的变化。
纵轴:TPR=正例分对的概率 = TP/(TP+FN),其实就是查全率
横轴:FPR=负例分错的概率 = FP/(FP+TN)

作图步骤:

根据学习器的预测结果(注意,是正例的概率值,非0/1变量)对样本进行排序(从大到小)-----这就是截断点依次选取的顺序按顺序选取截断点,并计算TPR和FPR---也可以只选取n个截断点,分别在1/n,2/n,3/n等位置连接所有的点(TPR,FPR)即为ROC图

KS值

作图步骤:

根据学习器的预测结果(注意,是正例的概率值,非0/1变量)对样本进行排序(从大到小)-----这就是截断点依次选取的顺序
按顺序选取截断点,并计算TPR和FPR ---也可以只选取n个截断点,分别在1/n,2/n,3/n等位置
横轴为样本的占比百分比(最大100%),纵轴分别为TPR和FPR,可以得到KS曲线
TPR和FPR曲线分隔最开的位置就是最好的”截断点“,最大间隔距离就是KS值,通常>0.2即可认为模型有比较好偶的预测准确性

y_pred = lr_model.predict_proba(x)[:,1]
fpr_lr_train,tpr_lr_train,_ = roc_curve(y,y_pred)
train_ks = abs(fpr_lr_train - tpr_lr_train).max()
print('train_ks : ',train_ks)

y_pred = lr_model.predict_proba(val_x)[:,1]
fpr_lr,tpr_lr,_ = roc_curve(val_y,y_pred)
val_ks = abs(fpr_lr - tpr_lr).max()
print('val_ks : ',val_ks)

from matplotlib import pyplot as plt
plt.plot(fpr_lr_train,tpr_lr_train,label = 'train LR')
plt.plot(fpr_lr,tpr_lr,label = 'evl LR')
plt.plot([0,1],[0,1],'k--')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC Curve')
plt.legend(loc = 'best')
plt.show()
#train_ks :  0.4151676259891534
#val_ks :  0.3856283523530577
#最好在5个百分点以内比较好

 

#再做特征筛选
from statsmodels.stats.outliers_influence import variance_inflation_factor
X = np.array(x)
print(X.shape)
for i in range(X.shape[1]):
    print(variance_inflation_factor(X,i))
#(79831, 8)
#1.3021397545577784
#1.9579535743187138
#1.2899442089163669
#2.9681708673324025
#3.287109972276014
#3.286493284008913
#3.3175087980337827
#3.2910065791107583
import lightgbm as lgb
from sklearn.model_selection import train_test_split
train_x,test_x,train_y,test_y = train_test_split(x,y,random_state=0,test_size=0.2)
#没放验证集的数据进行验证,所以要重新分割训练集 测试集
def  lgb_test(train_x,train_y,test_x,test_y):
    clf =lgb.LGBMClassifier(boosting_type = 'gbdt',
                           objective = 'binary',
                           metric = 'auc',
                           learning_rate = 0.1,
                           n_estimators = 24,
                           max_depth = 5,
                           num_leaves = 20,
                           max_bin = 45,
                           min_data_in_leaf = 6,
                           bagging_fraction = 0.6,
                           bagging_freq = 0,
                           feature_fraction = 0.8,
                           )
    clf.fit(train_x,train_y,eval_set = [(train_x,train_y),(test_x,test_y)],eval_metric = 'auc')
    return clf,clf.best_score_['valid_1']['auc'],
lgb_model , lgb_auc  = lgb_test(train_x,train_y,test_x,test_y)
'''或者'''
from sklearn.model_selection import cross_validate
clf =lgb.LGBMClassifier(boosting_type = 'gbdt',
                           objective = 'binary',
                           metric = 'auc',
                           learning_rate = 0.1,
                           n_estimators = 24,
                           max_depth = 5,
                           num_leaves = 20,
                           max_bin = 45,
                           min_data_in_leaf = 6,
                           bagging_fraction = 0.6,
                           bagging_freq = 0,
                           feature_fraction = 0.8,
                           )
cross_validate(clf,x,y,return_train_score=True,scoring="roc_auc")

feature_importance = pd.DataFrame({'name':lgb_model.booster_.feature_name(),
                                   'importance':lgb_model.feature_importances_}).sort_values(by=['importance'],ascending=False)
feature_importance
#[1]	training's auc: 0.759467	valid_1's auc: 0.753322
#[2]	training's auc: 0.809023	valid_1's auc: 0.805658
#[3]	training's auc: 0.809328	valid_1's auc: 0.803858
#[4]	training's auc: 0.810298	valid_1's auc: 0.801355
#[5]	training's auc: 0.814873	valid_1's auc: 0.807356
#[6]	training's auc: 0.816492	valid_1's auc: 0.809279
#[7]	training's auc: 0.820213	valid_1's auc: 0.809208
#[8]	training's auc: 0.823931	valid_1's auc: 0.812081
#[9]	training's auc: 0.82696	    valid_1's auc: 0.81453
#[10]	training's auc: 0.827882	valid_1's auc: 0.813428
#[11]	training's auc: 0.828881	valid_1's auc: 0.814226
#[12]	training's auc: 0.829577	valid_1's auc: 0.813749
#[13]	training's auc: 0.830406	valid_1's auc: 0.813156
#[14]	training's auc: 0.830843	valid_1's auc: 0.812973
#[15]	training's auc: 0.831587	valid_1's auc: 0.813501
#[16]	training's auc: 0.831898	valid_1's auc: 0.813611
#[17]	training's auc: 0.833751	valid_1's auc: 0.81393
#[18]	training's auc: 0.834139	valid_1's auc: 0.814532
#[19]	training's auc: 0.835177	valid_1's auc: 0.815209
#[20]	training's auc: 0.837368	valid_1's auc: 0.815205
#[21]	training's auc: 0.837946	valid_1's auc: 0.815099
#[22]	training's auc: 0.839585	valid_1's auc: 0.815602
#[23]	training's auc: 0.840781	valid_1's auc: 0.816105
#[24]	training's auc: 0.841174	valid_1's auc: 0.816869

 

特征分箱确实是评分卡模型必做的,保证模型稳定性

为什么有的公司用xgb还做分箱、bivar单调性变换?分箱让模型更加稳定,bivar单调性变换在xgb上用没意义(线上xgboost线下lr)

feature_lst = ['person_info','finance_info','credit_info','act_info'] #遍历法选出这几个特征
x = train[feature_lst]
y = train['bad_ind']

val_x =  val[feature_lst]
val_y = val['bad_ind']

lr_model = LogisticRegression(C=0.1,class_weight='balanced',penalty = "l2")
lr_model.fit(x,y)
y_pred = lr_model.predict_proba(x)[:,1]
fpr_lr_train,tpr_lr_train,_ = roc_curve(y,y_pred)
train_ks = abs(fpr_lr_train - tpr_lr_train).max()
print('train_ks : ',train_ks)

y_pred = lr_model.predict_proba(val_x)[:,1]
fpr_lr,tpr_lr,_ = roc_curve(val_y,y_pred)
val_ks = abs(fpr_lr - tpr_lr).max()
print('val_ks : ',val_ks)

from matplotlib import pyplot as plt
plt.plot(fpr_lr_train,tpr_lr_train,label = 'train LR')
plt.plot(fpr_lr,tpr_lr,label = 'evl LR')
plt.plot([0,1],[0,1],'k--')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC Curve')
plt.legend(loc = 'best')
plt.show()

print('变量名单:',feature_lst)
print('系数:',lr_model.coef_)
print('截距:',lr_model.intercept_)
#变量名单: ['person_info', 'finance_info', 'credit_info', 'act_info']
#系数: [[ 3.49460978 11.40051582  2.45541981 -1.68676079]]
#截距: [-0.34484897]
python3
C = [('e', 4, 2), ('a', 2, 1), ('c', 5, 4), ('b', 3, 3), ('d', 1, 5)]
print(sorted(C, key=lambda y: y[0]))
#输出[('a', 2, 1), ('b', 3, 3), ('c', 5, 4), ('d', 1, 5), ('e', 4, 2)]
print(sorted(C, key=lambda x: x[0]))
#[('a', 2, 1), ('b', 3, 3), ('c', 5, 4), ('d', 1, 5), ('e', 4, 2)]
print(sorted(C, key=lambda x: x[2]))
#[('a', 2, 1), ('e', 4, 2), ('b', 3, 3), ('c', 5, 4), ('d', 1, 5)]
 
'''key=lambda 元素: 元素[字段索引]
比如   print(sorted(C, key=lambda x: x[2]))   
x:x[]字母可以随意修改,排序方式按照中括号[]里面的维度进行排序,[0]按照第一维排序,[2]按照第三维排序'''
#生成报告
model = lr_model
row_num, col_num = 0, 0
bins = 20
Y_predict = [s[1] for s in model.predict_proba(val_x)]
print(Y_predict)
Y = val_y
nrows = Y.shape[0]
lis = [(Y_predict[i], Y[i]) for i in range(nrows)]
ks_lis = sorted(lis, key=lambda x: x[0], reverse=True)
bin_num = int(nrows/bins+1)
bad = sum([1 for (p, y) in ks_lis if y > 0.5])
good = sum([1 for (p, y) in ks_lis if y <= 0.5])

bad_cnt, good_cnt = 0, 0
KS = []
BAD = []
GOOD = []
BAD_CNT = []
GOOD_CNT = []
BAD_PCTG = []
BADRATE = []
dct_report = {}

for j in range(bins):
    ds = ks_lis[j*bin_num: min((j+1)*bin_num, nrows)]
    bad1 = sum([1 for (p, y) in ds if y > 0.5])
    good1 = sum([1 for (p, y) in ds if y <= 0.5])
    bad_cnt += bad1
    good_cnt += good1
    bad_pctg = round(bad_cnt/sum(val_y),3)
    badrate = round(bad1/(bad1+good1),3)
    ks = round(math.fabs((bad_cnt / bad) - (good_cnt / good)),3) #tpr-fpr等价
    KS.append(ks)
    BAD.append(bad1)
    GOOD.append(good1)
    BAD_CNT.append(bad_cnt)
    GOOD_CNT.append(good_cnt)
    BAD_PCTG.append(bad_pctg)
    BADRATE.append(badrate)
    dct_report['KS'] = KS
    dct_report['BAD'] = BAD
    dct_report['GOOD'] = GOOD
    dct_report['BAD_CNT'] = BAD_CNT
    dct_report['GOOD_CNT'] = GOOD_CNT
    dct_report['BAD_PCTG'] = BAD_PCTG
    dct_report['BADRATE'] = BADRATE
val_repot = pd.DataFrame(dct_report)
val_repot

                                         ​​​​​​​​​​​​ 

BAD_PCTG是坏人的捕获率

会看1)BADRATE是否严格递减  2)捕获率相比于上一次的模型有没有提升

贷后依然要监控——分数分布,模型PSI,变量PSI,低分原因,捕获率,模型KS

from pyecharts.charts import *   
from pyecharts import options as opts
from pylab import *       #这是导入了pylab模块的每个类,可以直接使用类,无需句点表示法
from matplotlib import pyplot as plt

# plt.rcParams['font.sans-serif'] = ['SimHei'] #设置字体
# np.set_printoptions(suppress=True) #取消精度设置
# pd.set_option('display.unicode.ambiguous_as_wide', True)
# pd.set_option('display.unicode.east_asian_width', True)

line = (
    Line()
    .add_xaxis(list(val_repot.index))
    .add_yaxis(
        "分组坏人占比",
        list(val_repot.BADRATE),
        yaxis_index=0,
        color="red",
    )
    .set_global_opts(
        title_opts=opts.TitleOpts(title="行为评分卡模型表现"),
    )
    .extend_axis(
        yaxis=opts.AxisOpts(
            name="累计坏人占比",
#             type_="value",
            min_=0,
            max_=0.5,
            position="right",
            axisline_opts=opts.AxisLineOpts(linestyle_opts=opts.LineStyleOpts(color="red")),
#             axislabel_opts=opts.LabelOpts(formatter="{value}"),
        )

    )
    .add_xaxis(list(val_repot.index))
    .add_yaxis(
        "KS",
        list(val_repot.KS),
        yaxis_index=1,
        color="blue",
    )
)
line.render_notebook()

'''或者'''
plt.rcParams['font.sans-serif'] = 'Heiti TC'
import matplotlib.pyplot as plt
plt.figure(figsize=(20,10),dpi=100)
plt.title("行为评分卡模型表现",size=15)
plt.plot(val_repot.index,val_repot.KS,label="KS")
plt.plot(val_repot.index,val_repot.BADRATE,c="r",label="BADRATE")
plt.xticks(range(0,20,1))
plt.legend(loc="best")

 逻辑回归方程

 \ln \frac{P}{(1-P)}=w_{1} * x_{1}+w_{2} * x_{2}+w_{3} * x_{4}+\ldots

 基础分650分;

好的概率是坏的概率的2倍时,加50分;

好的概率是坏的概率的4倍时,加100分;

好的概率是坏的概率的8倍时,加150分;

。。。

以此类推,得到分数换算公式:

\text { score }=650+50 * \log _{2} \frac{(1-P)}{P}

 

#['person_info','finance_info','credit_info','act_info']
#算分数onekey 
def score(person_info,finance_info,credit_info,act_info):
    xbeta = person_info * ( 3.49460978) + finance_info * ( 11.40051582 ) + credit_info * (2.45541981) + act_info * ( -1.68676079) --0.34484897 
    score = 650-34* (xbeta)/math.log(2)
    return score
val['score'] = val.apply(lambda x : score(x.person_info,x.finance_info,x.credit_info,x.act_info) ,axis=1)

fpr_lr,tpr_lr,_ = roc_curve(val_y,val['score'])
val_ks = abs(fpr_lr - tpr_lr).max()
print('val_ks : ',val_ks)

#对应评级区间
def level(score):
    level = 0
    if score <= 600:
        level = "D"
    elif score <= 640 and score > 600 : 
        level = "C"
    elif score <= 680 and score > 640:
        level = "B"
    elif  score > 680 :
        level = "A"
    return level
val['level'] = val.score.apply(lambda x : level(x) )
val.level.groupby(val.level).count()/len(val)
#val_ks :  0.4198642457760936
#level
#A    0.144351
#B    0.240188
#C    0.391299
#D    0.224163
#Name: level, dtype: float64
import seaborn as sns
sns.distplot(val.score,kde=True)

val = val.sort_values('score',ascending=True).reset_index(drop=True)
df2=val.bad_ind.groupby(val['level']).sum()
df3=val.bad_ind.groupby(val['level']).count()
print(df2/df3)    

# level
# A    0.002168
# B    0.008079
# C    0.014878
# D    0.055571
# Name: bad_ind, dtype: float64

  • 0
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值