参考链接:https://tianchi.aliyun.com/notebook-ai/detail?spm=5176.20850282.J_3678908510.4.4bcd4d57PnBPSN&postId=170949
https://github.com/datawhalechina/team-learning-data-mining/blob/master/FinancialRiskControl/Task2%20%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90.md
文章目录
一、学习知识点概要
1.对缺失值和异常的处理
2.分析数据特征之间的相关性
二、学习内容
2.1 学习目标
- 学习如何对数据集整体概况进行分析,包括数据集的基本情况(缺失值,异常值)
- 学习了解变量间的相互关系、变量与预测值之间的存在关系
2.2 内容介绍
- 数据总体了解:
- 读取数据集并了解数据集大小,原始特征维度;
- 通过info熟悉数据类型;
- 粗略查看数据集中各特征基本统计量;
- 缺失值和唯一值:
- 查看数据缺失值情况
- 查看唯一值特征情况
- 深入数据-查看数据类型
- 类别型数据
- 数值型数据
- 离散数值型数据
- 连续数值型数据
- 数据间相关关系
- 特征和特征之间关系
- 特征和目标变量之间关系
- 用pandas_profiling生成数据报告
2.3 代码示例
2.3.1 导入库及读取数据
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import warnings
warnings.filterwarnings('ignore')
data_train = pd.read_csv('./train.csv')
data_test_a = pd.read_csv('./testA.csv')
data_train_sample = pd.read_csv("./train.csv",nrows=5)
2.3.2 总体了解数据集
train.shape
test.shape
train.info()
train.columns
train.describe()
2.3.3查看数据集中特征缺失值,唯一值
print(f'There are {data_train.isnull().any().sum()} columns in train dataset with missing values.')
have_null_fea_dict = (data_train.isnull().sum()/len(data_train)).to_dict()
fea_null_moreThanHalf = {}
for key,value in have_null_fea_dict.items():
if value > 0.5:
fea_null_moreThanHalf[key] = value
fea_null_moreThanHalf
# nan可视化
missing = data_train.isnull().sum()/len(data_train)
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing.plot.bar()
one_value_fea = [col for col in data_train.columns if data_train[col].nunique() <= 1]
one_value_fea_test = [col for col in data_test_a.columns if data_test_a[col].nunique() <= 1]
one_value_fea
one_value_fea_test
print(f'There are {len(one_value_fea)} columns in train dataset with one unique value.')
print(f'There are {len(one_value_fea_test)} columns in test dataset with one unique value.')
2.3.4 查看特征的数值类型及对象类型
numerical_fea = list(data_train.select_dtypes(exclude=['object']).columns)
category_fea = list(filter(lambda x: x not in numerical_fea,list(data_train.columns)))
numerical_fea
category_fea
data_train.grade
#过滤数值型类别特征
def get_numerical_serial_fea(data,feas):
numerical_serial_fea = []
numerical_noserial_fea = []
for fea in feas:
temp = data[fea].nunique()
if temp <= 10:
numerical_noserial_fea.append(fea)
continue
numerical_serial_fea.append(fea)
return numerical_serial_fea,numerical_noserial_fea
numerical_serial_fea,numerical_noserial_fea = get_numerical_serial_fea(data_train,numerical_fea)
numerical_serial_fea
numerical_noserial_fea
data_train['term'].value_counts()#离散型变量
data_train['homeOwnership'].value_counts()#离散型变量
data_train['verificationStatus'].value_counts()#离散型变量
data_train['initialListStatus'].value_counts()#离散型变量
data_train['applicationType'].value_counts()#离散型变量
data_train['policyCode'].value_counts()#离散型变量,无用,全部一个值
data_train['n11'].value_counts()#离散型变量,相差悬殊,用不用再分析
data_train['n12'].value_counts()#离散型变量,相差悬殊,用不用再分析
#每个数字特征得分布可视化
# 这里画图估计需要10-15分钟
f = pd.melt(data_train, value_vars=numerical_serial_fea)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False)
g = g.map(sns.distplot, "value")
#Ploting Transaction Amount Values Distribution
plt.figure(figsize=(16,12))
plt.suptitle('Transaction Values Distribution', fontsize=22)
plt.subplot(221)
sub_plot_1 = sns.distplot(data_train['loanAmnt'])
sub_plot_1.set_title("loanAmnt Distribuition", fontsize=18)
sub_plot_1.set_xlabel("")
sub_plot_1.set_ylabel("Probability", fontsize=15)
plt.subplot(222)
sub_plot_2 = sns.distplot(np.log(data_train['loanAmnt']))
sub_plot_2.set_title("loanAmnt (Log) Distribuition", fontsize=18)
sub_plot_2.set_xlabel("")
sub_plot_2.set_ylabel("Probability", fontsize=15)
category_fea
data_train['grade'].value_counts()
data_train['subGrade'].value_counts()
data_train['employmentLength'].value_counts()
data_train['issueDate'].value_counts()
data_train['earliesCreditLine'].value_counts()
data_train['isDefault'].value_counts()
2.3.5 变量分布可视化
plt.figure(figsize=(8, 8))
sns.barplot(data_train["employmentLength"].value_counts(dropna=False)[:20],
data_train["employmentLength"].value_counts(dropna=False).keys()[:20])
plt.show()
train_loan_fr = data_train.loc[data_train['isDefault'] == 1]
train_loan_nofr = data_train.loc[data_train['isDefault'] == 0]
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 8))
train_loan_fr.groupby('grade')['grade'].count().plot(kind='barh', ax=ax1, title='Count of grade fraud')
train_loan_nofr.groupby('grade')['grade'].count().plot(kind='barh', ax=ax2, title='Count of grade non-fraud')
train_loan_fr.groupby('employmentLength')['employmentLength'].count().plot(kind='barh', ax=ax3, title='Count of employmentLength fraud')
train_loan_nofr.groupby('employmentLength')['employmentLength'].count().plot(kind='barh', ax=ax4, title='Count of employmentLength non-fraud')
plt.show()
2.2.6 时间格式数据处理及查看
#转化成时间格式 issueDateDT特征表示数据日期离数据集中日期最早的日期(2007-06-01)的天数
data_train['issueDate'] = pd.to_datetime(data_train['issueDate'],format='%Y-%m-%d')
startdate = datetime.datetime.strptime('2007-06-01', '%Y-%m-%d')
data_train['issueDateDT'] = data_train['issueDate'].apply(lambda x: x-startdate).dt.days
#转化成时间格式
data_test_a['issueDate'] = pd.to_datetime(data_train['issueDate'],format='%Y-%m-%d')
startdate = datetime.datetime.strptime('2007-06-01', '%Y-%m-%d')
data_test_a['issueDateDT'] = data_test_a['issueDate'].apply(lambda x: x-startdate).dt.days
plt.hist(data_train['issueDateDT'], label='train');
plt.hist(data_test_a['issueDateDT'], label='test');
plt.legend();
plt.title('Distribution of issueDateDT dates');
#train 和 test issueDateDT 日期有重叠 所以使用基于时间的分割进行验证是不明智的
2.2.7 掌握透视图可以让我们更好的了解数据
#透视图 索引可以有多个,“columns(列)”是可选的,聚合函数aggfunc最后是被应用到了变量“values”中你所列举的项目上。
pivot = pd.pivot_table(data_train, index=['grade'], columns=['issueDateDT'], values=['loanAmnt'], aggfunc=np.sum)
pivot
2.2.8 用pandas_profiling生成数据报告
import pandas_profiling
pfr = pandas_profiling.ProfileReport(data_train)
pfr.to_file("./example.html")
三、学习问题与解答
对于缺失值的处理,不知道百分比为多少的时候删除?该怎么填充,用什么来判断填充的好与坏?
四、学习思考与总结
通过本章节的学习,对数据探索性分析有了更深的了解,但是还远远不够。EDA是我们初步了解数据,熟悉数据为特征工程做准备的阶段,甚至很多时候EDA阶段提取出来的特征可以直接当作规则来用。