数据挖掘实战(三):模型构建

用逻辑回归、svm和决策树;随机森林和XGBoost进行模型构建,评分方式任意,如准确率等。(不需要考虑模型调参)

1、数据处理

import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df = pd.read_csv('data.csv',encoding='gbk')
df_raw = df.copy()
df.dropna(axis=0,inplace=True)
df.drop(['custid','trade_no','bank_card_no','source','id_name'],axis=1,inplace=True)
df.drop(['consume_top_time_last_1_month','consume_top_time_last_6_month'],axis=1,inplace=True)  #这两个特征重复了
new_df = pd.DataFrame(pd.to_datetime(df['loans_latest_time']) - pd.to_datetime(df['loans_latest_time'].min()))
df['loans_latest_time'] = new_df
df['loans_latest_time'] = df['loans_latest_time'].map(lambda x:x.days) 
new_df = pd.DataFrame(pd.to_datetime(df['latest_query_time']) - pd.to_datetime(df['latest_query_time'].min()))
df['latest_query_time'] = new_df
df['latest_query_time'] = df['latest_query_time'].map(lambda x:x.days)   #一样的操作
from sklearn import preprocessing
label = preprocessing.LabelEncoder()
reg = label.fit_transform(df['reg_preference_for_trad'])
df['reg_preference_for_trad'] = reg
# 根据IV(信息价值)
def calc_iv(df, feature, target, pr=False):
    """
    Set pr=True to enable printing of output.

    Output:
      * iv: float,
      * data: pandas.DataFrame
    """
    lst = []
    df[feature] = df[feature].fillna('NULL')

    for i in range(df[feature].nunique()):  # nuinque()是查看该序列(axis=0/1对应着列或行)的不同值的数量个数
        val = list(df[feature].unique())[i]  #这里是讲特征里的每个值作为分组,也可以分段分组
        lst.append([feature,
                    val,  # Value
                    df[df[feature] == val].count()[feature],  # 特征中等于分组值的计数
                    df[(df[feature] == val) & (df[target] == 0)].count()[feature],  # good rate
                    df[(df[feature] == val) & (df[target] == 1)].count()[feature]])  # bad rate
    data = pd.DataFrame(lst, columns=['Variable', 'Value', 'All', 'Good', 'Bad'])

    data['Share'] = data['All'] / data['All'].sum()
    data['Bad Rate'] = data['Bad'] / data['All']
    data['Distribution Good'] = (data['All'] - data['Bad']) / (data['All'].sum() - data['Bad'].sum())
    data['Distribution Bad'] = data['Bad'] / data['Bad'].sum()
    data['WoE'] = np.log(data['Distribution Good'] / data['Distribution Bad']) #woe计算式为log((根据分组值选出的好的/所有的好的)/
                                                                                        #(根据分组值选出的坏的的/所有的坏的)

    data = data.replace({'WoE': {np.inf: 0, -np.inf: 0}})

    data['IV'] = data['WoE'] * (data['Distribution Good'] - data['Distribution Bad']) #计算iv要乘后面那个是为了保证用比率来限制iv
                                                                                      #并且iv值范围为【0,+无穷】

    data = data.sort_values(by=['Variable', 'Value'], ascending=[True, True])
    data.index = range(len(data.index))

    if pr:
        print(data)
        print("IV = ", data['IV'].sum())

    iv = data['IV'].sum()   #所有分组值的iv求和就是该特征的iv值

    return iv, data

column_headers = list(df.columns.values)
# print(column_headers)

d=[]
for x in column_headers:   #输入每个特征,单独计算IV值
    IV_1, data = calc_iv(df, x, 'status')
    d.append(IV_1)
#整理成Series类型并合并
column_headers=pd.Series(column_headers,name='feature')
d=pd.Series(d,name='iv_value')

# print(column_headers)
iv_result=pd.concat([column_headers,d],axis=1)
iv_result.sort_values(by='iv_value',ascending=False)
#这里用根据IV选择出来的特征
features = list(iv_result[iv_result['iv_value']>0.4]['feature'])
X = df[features]
y = df['status']
from sklearn.model_selection import train_test_split   #数据切分
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=2019)
from sklearn.preprocessing import StandardScaler  #标准化
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.fit_transform(X_test)

2、模型导入

#导入模型
from sklearn.linear_model import  LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
log_clf = LogisticRegression()
svc_clf = SVC()
tree_clf = DecisionTreeClassifier()
forest_clf = RandomForestClassifier()
xgb_clf = XGBClassifier()
models = {'log_clf':log_clf,'svc_clf':svc_clf,'tree_clf':tree_clf,'forest_clf':forest_clf,'xgb_clf':xgb_clf}

3、模型评估

from sklearn.metrics import recall_score,precision_score,f1_score,accuracy_score
def metrics(models,X_train_scaled,X_test_scaled,y_train,y_test):
    results = pd.DataFrame(columns=['recall_score','precision_score','f1_score','accuracy_score'])
    for model in models:
        name = str(model)
        result = []
        model = models[model]
        model.fit(X_train_scaled,y_train)
        y_pre = model.predict(X_test_scaled)
        result.append(recall_score(y_pre,y_test))
        result.append(precision_score(y_pre,y_test))
        result.append(f1_score(y_pre,y_test))
        result.append(accuracy_score(y_pre,y_test))
        results.loc[name] = result
    return results
metrics(models,X_train_scaled,X_test_scaled,y_train,y_test)

输出:
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值