【分箱操作】决策树、卡方、分位数、等距和映射分箱操作代码实现

Table of Contents

import numpy as np
import pandas as pd
from sklearn.datasets import load_breast_cancer
import numpy as np 
import pandas as pd
import matplotlib.pyplot as plt 
import seaborn as sns
%matplotlib inline
plt.rcParams["font.sans-serif"] = ["FangSong"] 
plt.rcParams["axes.unicode_minus"] = False 
import warnings
warnings.filterwarnings("ignore")
from sklearn.tree import DecisionTreeClassifier

数据准备

使用x作为待分箱数据
y为分箱的目标标签
data = load_breast_cancer()
df = pd.DataFrame(data.data, columns=data.feature_names)
variable = "mean radius"
x = df[variable].values
y = data.target
test = pd.DataFrame({'x':x,'y':y})
test.head()
xy
017.990
120.570
219.690
311.420
420.290
sns.kdeplot(test.x)
<matplotlib.axes._subplots.AxesSubplot at 0x298b39e7390>

在这里插入图片描述

sns.boxplot(test.x)
<matplotlib.axes._subplots.AxesSubplot at 0x298b3c79b00>

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-XtQ0F2dI-1601345323110)(output_10_1.png)]

woe和iv的计算过程和理解可见:
https://blog.csdn.net/xiezhen_zheng/article/details/82888653

定义计算函数

def woe_iv(data, x_col='x', y_col='y'):
    '''
    data  为含有y和分箱标签的dataframe
    x_col 为分箱标签
    y_col 为分类标签
    '''
    rate_table = test.groupby(x_col)[y_col].agg(['count', 'sum']).rename(
        columns={'count': 'total', 'sum': 'bad'})  # 对X进行加总,计算等到每一类型下的总数量和坏样本数量
    rate_table['good'] = rate_table['total'] - \
        rate_table['bad']  # 计算每一类型下好样本的数量
    total_bad = rate_table.sum()['bad']  # 计算坏样本总量
    total_good = rate_table.sum()['good']  # 计算好样本总量
    rate_table['p_bad'] = rate_table['bad']/total_bad  # 就是每一类型下坏样本的概率
    rate_table['p_good'] = rate_table['good']/total_good  # 就是每一类型下好样本的概率
    woe = np.log(rate_table['p_bad']/rate_table['p_good'])  # 就是每一类型的WOE值
    iv = (rate_table['p_bad'] - rate_table['p_good'])*woe  # 就是每一类的IV值
    rate_table['iv'] = iv
    rate_table['woe'] = woe
    return rate_table

决策树分箱

def optimal_binning_boundary(data, x_col = 'x',y_col = 'y',criterion='gini') -> list:
    '''
    利用决策树进行分箱
    data  为含有y和分箱标签的dataframe
    x_col 为分箱标签
    y_col 为分类标签
    criterion 为决策树分类准则
    '''
    x0 = data[x_col]
    x = data[x_col].values
    y = data[y_col].values
    boundary = []  # 待return的分箱边界值列表
    clf = DecisionTreeClassifier(criterion=criterion,  # 决策树分类准则
                                 max_leaf_nodes=6,       # 最大叶子节点数
                                 min_samples_leaf=0.05)  # 叶子节点样本数量最小占比

    clf.fit(x.reshape(-1, 1), y)  # 训练决策树
    n_nodes = clf.tree_.node_count
    children_left = clf.tree_.children_left
    children_right = clf.tree_.children_right
    threshold = clf.tree_.threshold
    for i in range(n_nodes):
        if children_left[i] != children_right[i]:  # 获得决策树节点上的划分边界值
            boundary.append(threshold[i])
    boundary.sort()
    min_x = x.min()
    max_x = x.max()  
    boundary = [min_x] + boundary + [max_x]
    data['bins_dtc'] = pd.cut(x0, bins=boundary, include_lowest=True, labels=False)
    return boundary, data
boundary, bins = optimal_binning_boundary(test)
woe_iv(bins,'bins_dtc')
totalbadgoodp_badp_goodivwoe
bins_dtc
015014730.4117650.0141511.3402253.370671
1115105100.2941180.0471700.4519701.830226
24939100.1092440.0471700.0521310.839827
38355280.1540620.1320750.0033850.153979
45410440.0280110.2075470.359566-2.002754
511811170.0028010.5518872.900997-5.283323

分位数分箱

使用pd.qcut函数
bins = pd.qcut(df[variable] ,5,labels=False,)
test['bins_q'] = bins
woe_iv(test,'bins_q')
totalbadgoodp_badp_goodivwoe
bins_q
011411220.3137250.0094341.0662993.504202
111410680.2969190.0377360.5346552.062848
211391220.2549020.1037740.1358140.898668
311447670.1316530.3160380.161465-0.875695
411411130.0028010.5330192.782868-5.248537

等距和等距log映射

np.floor_divide(a,b) a除b再取整
bins = np.floor() 向下取整
bins = np.floor_divide(df[variable] ,6)
test['bins_divide'] = bins 
woe_iv(test,'bins_divide')
totalbadgoodp_badp_goodivwoe
bins_divide
1.016916360.4565830.0283021.1909812.780841
2.03081941140.5434170.5377360.0000600.010510
3.0850850.0000000.400943inf-inf
4.07070.0000000.033019inf-inf
bins = np.floor(np.log2(df[variable]))
test['bins_log'] = bins 
woe_iv(test,'bins_log')
totalbadgoodp_badp_goodivwoe
bins_log
2.04400.0112040.000000infinf
3.0424347770.9719890.3632080.5992660.984370
4.014161350.0168070.6367922.253440-3.634665

卡方分箱

卡方分箱的具体思路为:
https://mp.weixin.qq.com/s?__biz=MzA5Njc1MDA2Ng%3D%3D&idx=1&mid=2651650083&sn=a24381efa404500ae96ccfcc3716a614

def Chi2(df, total_col, bad_col,overallRate):
    '''
     #此函数计算卡方值
     :df dataFrame
     :total_col 每个值得总数量
     :bad_col 每个值的坏数据数量
     :overallRate 坏数据的占比
     : return 卡方值
    '''
    df2=df.copy()
    df2['expected']=df[total_col].apply(lambda x: x*overallRate)
    combined=zip(df2['expected'], df2[bad_col])
    chi=[(i[0]-i[1])**2/i[0] for i in combined]
    chi2=sum(chi)
    return chi2
def chiMerge(data,x_col = 'x',y_col = 'y',max_bins = 5):
    gro = data.groupby(x_col)[y_col].agg(['mean', 'count'])
    gro['bad'] = gro['count']*gro['mean']
    total_rate_bad = gro.sum()['bad']/gro.sum()['count']
    gro['exp_bad'] = gro['count']*total_rate_bad
    gro['chi2'] = ((gro['exp_bad'] - gro['bad']) ** 2)/gro['exp_bad']
    gro.drop('mean', 1, inplace=True)
    bad = list(gro['bad'])
    exp_bad = list(gro['exp_bad'])
    chi_c = np.array((bad,exp_bad)).T
    interval = [[i] for i in list(gro.index)]
    chi2 = list(gro['chi2'].values)
    while len(interval) >max_bins:
        between_sum = [chi2[i] + chi2[i+1] for i in range(len(chi2)-1)]
        chi2_min_index = between_sum.index(min(between_sum))
        interval[chi2_min_index] = interval[chi2_min_index] + interval[chi2_min_index+1]
        interval.pop(chi2_min_index+1)
        chi_c[chi2_min_index] = chi_c[chi2_min_index] + chi_c[chi2_min_index + 1]
        chi_c = np.delete(chi_c,chi2_min_index + 1,0)
        chi2[chi2_min_index] = ((chi_c[chi2_min_index][0] - chi_c[chi2_min_index][1])**2)/chi_c[chi2_min_index][1]
        chi2.pop(chi2_min_index+1)
    interval_ = [min(i) for i in interval] + [data[x_col].max()]
    bins = pd.cut(data[x_col],interval_,labels=False,include_lowest=True)
    return bins,interval_
inter = chiMerge(test)
bins = inter[0]
test['bins_kafang'] = bins
woe_iv(test,'bins_kafang')
totalbadgoodp_badp_goodivwoe
bins_kafang
0425351740.9831930.3490570.6566941.035572
1556490.0168070.2311320.561792-2.621210
2290290.0000000.136792inf-inf
3310310.0000000.146226inf-inf
4290290.0000000.136792inf-inf

特征变量决策树分箱是一种常见的特征工程方法,它可以将连续型特征离散化为多个区间,这有助于提高机器学习模型的性能和解释性。下面介绍如何使用决策树实现特征变量分箱。 假设我们有一个连续型特征 x,我们想将其分成 k 个桶(bin)。首先,我们需要构建一棵决策树来找到最佳的分割点。具体步骤如下: 1. 从所有可能的分割点中选择一个作为根节点。可以选择所有可能的取值,也可以根据某种策略进行选择。 2. 对于每个子节点,计算其信息增益或GINI指数(或其他评估标准),并选择最佳的分割点。 3. 递归地进行步骤2,直到达到预定的树的深度或者叶子节点中样本数达到了预设的最小值。 4. 最后,将所有叶子节点作为分箱的区间。 在实际应用中,还需要考虑以下问题: 1. 分割点的选择:可能的选择包括等距分割、等频分割、卡方分割等。 2. 树的深度和叶子节点的样本数:这两个参数会影响模型的复杂度和性能。可以通过交叉验证等方法来选择最优的参数。 3. 特征缩放:由于决策树的分裂点是单变量的,因此特征缩放不会影响决策树分箱结果。 总之,特征变量决策树分箱是一种常见的特征工程方法,它可以将连续型特征离散化为多个区间,以提高机器学习模型的性能和解释性。实现时需要注意选择分割点的方法、树的深度和叶子节点的样本数等参数。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值