DataWhale 金融风控-贷款违约预测 Task3 特征分析

Task3 特征分析

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.preprocessing import MinMaxScaler
import xgboost as xgb
import lightgbm as lgb
from catboost import CatBoostRegressor
import warnings
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, log_loss
warnings.filterwarnings('ignore')

data_train =pd.read_csv('../train.csv')
data_test_a = pd.read_csv('../testA.csv')

# 特征预处理
numerical_fea = list(data_train.select_dtypes(exclude=['object']).columns)
category_fea = list(filter(lambda x: x not in numerical_fea,list(data_train.columns)))
label = 'isDefault'
numerical_fea.remove(label)

# 缺失值填充
data_train.isnull().sum()

#按照平均数填充数值型特征
data_train[numerical_fea] =
data_train[numerical_fea].fillna(data_train[numerical_fea].median())
data_test_a[numerical_fea] =
data_test_a[numerical_fea].fillna(data_train[numerical_fea].median())
#按照众数填充类别型特征
data_train[category_fea] = data_train[category_fea].fillna(data_train[category_fea].mode())
data_test_a[category_fea] =
data_test_a[category_fea].fillna(data_train[category_fea].mode())

data_train.isnull().sum()

#查看类别特征
category_fea

# 时间格式处理
#转化成时间格式
for data in [data_train, data_test_a]:
	data['issueDate'] = pd.to_datetime(data['issueDate'],format='%Y-%m-%d')
	startdate = datetime.datetime.strptime('2007-06-01', '%Y-%m-%d')
	#构造时间特征
	data['issueDateDT'] = data['issueDate'].apply(lambda x: x-startdate).dt.days

data_train['employmentLength'].value_counts(dropna=False).sort_index()

# 对象类型特征转换到数值
def employmentLength_to_int(s):
	if pd.isnull(s):
		return s
	else:
		return np.int8(s.split()[0])
for data in [data_train, data_test_a]:
	data['employmentLength'].replace(to_replace='10+ years', value='10 years', inplace=True)
	data['employmentLength'].replace('< 1 year', '0 years', inplace=True)
	data['employmentLength'] = data['employmentLength'].apply(employmentLength_to_int)

data['employmentLength'].value_counts(dropna=False).sort_index()

# 对earliesCreditLine进行预处理
data_train['earliesCreditLine'].sample(5)

for data in [data_train, data_test_a]:
	data['earliesCreditLine'] = data['earliesCreditLine'].apply(lambda s: int(s[-4:]))


# 类别特征处理
cate_features = ['grade', 'subGrade', 'employmentTitle', 'homeOwnership', 'verificationStatus', 'purpose', 'postCode', 'regionCode', 'applicationType', 'initialListStatus', 'title', 'policyCode']
for f in cate_features:
	print(f, '类型数:', data[f].nunique())

for data in [data_train, data_test_a]:
	data['grade'] = data['grade'].map({'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7})

# 类型数在2之上,又不是高维稀疏的,且纯分类特征
for data in [data_train, data_test_a]:
	data = pd.get_dummies(data, columns=['subGrade', 'homeOwnership', 'verificationStatus',
	'purpose', 'regionCode'], drop_first=True)

# 异常值处理
# 检测异常的方法:均方差
def find_outliers_by_3segama(data,fea):
	data_std = np.std(data[fea])
	data_mean = np.mean(data[fea])
	outliers_cut_off = data_std * 3
	lower_rule = data_mean - outliers_cut_off
	upper_rule = data_mean + outliers_cut_off
	data[fea+'_outliers'] = data[fea].apply(lambda x:str('异常值') if x > upper_rule or x <
	lower_rule else '正常值')
	return data

data_train = data_train.copy()
for fea in numerical_fea:
	data_train = find_outliers_by_3segama(data_train,fea)
	print(data_train[fea+'_outliers'].value_counts())
	print(data_train.groupby(fea+'_outliers')['isDefault'].sum())
	print('*'*10)
	
# 特征交互
for col in ['grade', 'subGrade']:
	temp_dict = data_train.groupby([col])
	['isDefault'].agg(['mean']).reset_index().rename(columns={'mean': col + '_target_mean'})
	temp_dict.index = temp_dict[col].values
	temp_dict = temp_dict[col + '_target_mean'].to_dict()
	data_train[col + '_target_mean'] = data_train[col].map(temp_dict)
	data_test_a[col + '_target_mean'] = data_test_a[col].map(temp_dict)

# 其他衍生变量 mean 和 std
for df in [data_train, data_test_a]:
	for item in ['n0','n1','n2','n2.1','n4','n5','n6','n7','n8','n9','n10','n11','n12','n13','n14']:
		df['grade_to_mean_' + item] = df['grade'] / df.groupby([item])
		['grade'].transform('mean')
		df['grade_to_std_' + item] = df['grade'] / df.groupby([item])
		['grade'].transform('std')

# 特征编码
# 高维类别特征需要进行转换
for col in tqdm(['employmentTitle', 'postCode', 'title','subGrade']):
	le = LabelEncoder()
	le.fit(list(data_train[col].astype(str).values) + 	list(data_test_a[col].astype(str).values))
	data_train[col] = le.transform(list(data_train[col].astype(str).values))
	data_test_a[col] = le.transform(list(data_test_a[col].astype(str).values))
	print('Label Encoding 完成')

# 特征选择
from sklearn.feature_selection import VarianceThreshold
#其中参数threshold为方差的阈值
VarianceThreshold(threshold=3).fit_transform(train,target_train)

from sklearn.feature_selection import SelectKBest
from scipy.stats import pearsonr
#选择K个最好的特征,返回选择特征后的数据
#第一个参数为计算评估特征是否好的函数,该函数输入特征矩阵和目标向量,
#输出二元组(评分,P值)的数组,数组第i项为第i个特征的评分和P值。在此定义为计算相关系数
#参数k为选择的特征个数
SelectKBest(k=5).fit_transform(train,target_train)

from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
#参数k为选择的特征个数
SelectKBest(chi2, k=5).fit_transform(train,target_train)

from sklearn.feature_selection import SelectKBest
from minepy import MINE
#由于MINE的设计不是函数式的,定义mic方法将其为函数式的,
#返回一个二元组,二元组的第2项设置成固定的P值0.5
def mic(x, y):
	m = MINE()
	m.compute_score(x, y)
	return (m.mic(), 0.5)
#参数k为选择的特征个数
SelectKBest(lambda X, Y: array(map(lambda x:mic(x, Y), X.T)).T, k=2).fit_transform(train,target_train)

# Wrapper
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
#递归特征消除法,返回特征选择后的数据
#参数estimator为基模型
#参数n_features_to_select为选择的特征个数
RFE(estimator=LogisticRegression(),
n_features_to_select=2).fit_transform(train,target_train)

# Embedded
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LogisticRegression
#带L1惩罚项的逻辑回归作为基模型的特征选择
SelectFromModel(LogisticRegression(penalty="l1", C=0.1)).fit_transform(train,target_train)

from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import GradientBoostingClassifier
#GBDT作为基模型的特征选择
SelectFromModel(GradientBoostingClassifier()).fit_transform(train,target_train)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值