Datawhale组队学习-金融时序数据挖掘实践-Task05特征工程

Datawhale组队学习-金融时序数据挖掘实践-Task05特征工程

# 为方面后面操作,设置全局index变量

labels = ['total_purchase_amt','total_redeem_amt']
date_indexs = ['week','year','month','weekday','day']
# Load the balance data
def load_data(path: str = 'user_balance_table.csv')->pd.DataFrame:
    data_balance = pd.read_csv(path)
    return data_balance.reset_index(drop=True)
    

# add tiemstamp to dataset
def add_timestamp(data: pd.DataFrame, time_index: str = 'report_date')->pd.DataFrame:
    data_balance = data.copy()
    data_balance['date'] = pd.to_datetime(data_balance[time_index], format= "%Y%m%d")
    data_balance['day'] = data_balance['date'].dt.day
    data_balance['month'] = data_balance['date'].dt.month
    data_balance['year'] = data_balance['date'].dt.year
    data_balance['week'] = data_balance['date'].dt.week
    data_balance['weekday'] = data_balance['date'].dt.weekday
    return data_balance.reset_index(drop=True)

# total amount
def get_total_balance(data: pd.DataFrame, date: str = '2014-03-31')->pd.DataFrame:
    df_tmp = data.copy()
    df_tmp = df_tmp.groupby(['date'])['total_purchase_amt','total_redeem_amt'].sum()
    df_tmp.reset_index(inplace=True)
    return df_tmp[(df_tmp['date']>= date)].reset_index(drop=True)

# Generate the test data
def generate_test_data(data: pd.DataFrame)->pd.DataFrame:
    total_balance = data.copy()
    start = datetime.datetime(2014,9,1)
    testdata = []
    while start != datetime.datetime(2014,10,15):
        temp = [start, np.nan, np.nan]
        testdata.append(temp)
        start += datetime.timedelta(days = 1)
    testdata = pd.DataFrame(testdata)
    testdata.columns = total_balance.columns

    total_balance = pd.concat([total_balance, testdata], axis = 0)
    total_balance = total_balance.reset_index(drop=True)
    return total_balance.reset_index(drop=True)

# Load user's information
def load_user_information(path: str = 'user_profile_table.csv')->pd.DataFrame:
    return pd.read_csv(path)
# 读取数据集

balance_data = load_data('Data/user_balance_table.csv')
balance_data = add_timestamp(balance_data, time_index='report_date')
total_balance = get_total_balance(balance_data)
total_balance = generate_test_data(total_balance)
total_balance = add_timestamp(total_balance, 'date')
user_information = load_user_information('Data/user_profile_table.csv')

特征提取

一、基于日期的静态特征

1.1 提取 is 特征

# 获取节假日集合

def get_holiday_set()->Set[datetime.date]:
    holiday_set = set()
    # 清明节
    holiday_set = holiday_set | {datetime.date(2014,4,5), datetime.date(2014,4,6), datetime.date(2014,4,7)}
    # 劳动节
    holiday_set = holiday_set | {datetime.date(2014,5,1), datetime.date(2014,5,2), datetime.date(2014,5,3)}
    # 端午节
    holiday_set = holiday_set | {datetime.date(2014,5,31), datetime.date(2014,6,1), datetime.date(2014,6,2)}
    # 中秋节
    holiday_set = holiday_set | {datetime.date(2014,9,6), datetime.date(2014,9,7), datetime.date(2014,9,8)}
    # 国庆节
    holiday_set = holiday_set | {datetime.date(2014,10,1), datetime.date(2014,10,2), datetime.date(2014,10,3),\
                                 datetime.date(2014,10,4), datetime.date(2014,10,5), datetime.date(2014,10,6),\
                                datetime.date(2014,10,7)}
    # 中秋节
    holiday_set = holiday_set | {datetime.date(2013,9,19), datetime.date(2013,9,20), datetime.date(2013,9,21)}
    # 国庆节
    holiday_set = holiday_set | {datetime.date(2013,10,1), datetime.date(2013,10,2), datetime.date(2013,10,3),\
                                 datetime.date(2013,10,4), datetime.date(2013,10,5), datetime.date(2013,10,6),\
                                datetime.date(2013,10,7)}
    return holiday_set
# 提取所有 is特征

def extract_is_feature(data: pd.DataFrame)->pd.DataFrame:
    total_balance = data.copy().reset_index(drop=True)
    
    # 是否是Weekend
    total_balance['is_weekend'] = 0
    total_balance.loc[total_balance['weekday'].isin((5,6)), 'is_weekend'] = 1
    # 是否是假期
    total_balance['is_holiday'] = 0
    total_balance.loc[total_balance['date'].isin(get_holiday_set()), 'is_holiday'] = 1
    
    # 是否是节假日的第一天
    last_day_flag = 0
    total_balance['is_firstday_of_holiday'] = 0
    for index, row in total_balance.iterrows():
        if last_day_flag == 0 and row['is_holiday'] == 1:
            total_balance.loc[index, 'is_firstday_of_holiday'] = 1
        last_day_flag = row['is_holiday']

    # 是否是节假日的最后一天
    total_balance['is_lastday_of_holiday'] = 0
    for index, row in total_balance.iterrows():
        if row['is_holiday'] == 1 and total_balance.loc[index+1, 'is_holiday'] == 0:
             total_balance.loc[index, 'is_lastday_of_holiday'] = 1

    # 是否是节假日后的上班第一天
    total_balance['is_firstday_of_work'] = 0
    last_day_flag = 0
    for index, row in total_balance.iterrows():
        if last_day_flag == 1 and row['is_holiday'] == 0:
            total_balance.loc[index, 'is_firstday_of_work'] = 1
        last_day_flag = row['is_lastday_of_holiday']

    # 是否不用上班
    total_balance['is_work'] = 1
    total_balance.loc[(total_balance['is_holiday'] == 1) | (total_balance['is_weekend'] == 1), 'is_work'] = 0
    special_work_day_set = {datetime.date(2014,5,4), datetime.date(2014,9,28)}
    total_balance.loc[total_balance['date'].isin(special_work_day_set), 'is_work'] = 1

    # 是否明天要上班
    total_balance['is_gonna_work_tomorrow'] = 0
    for index, row in total_balance.iterrows():
        if index == len(total_balance)-1:
            break
        if row['is_work'] == 0 and total_balance.loc[index+1, 'is_work'] == 1:
             total_balance.loc[index, 'is_gonna_work_tomorrow'] = 1

    # 昨天上班了吗
    total_balance['is_worked_yestday'] = 0
    for index, row in total_balance.iterrows():
        if index <= 1:
            continue
        if total_balance.loc[index-1, 'is_work'] == 1:
             total_balance.loc[index, 'is_worked_yestday'] = 1

    # 是否是放假前一天
    total_balance['is_lastday_of_workday'] = 0
    for index, row in total_balance.iterrows():
        if index == len(total_balance)-1:
            break
        if row['is_holiday'] == 0 and total_balance.loc[index+1, 'is_holiday'] == 1:
             total_balance.loc[index, 'is_lastday_of_workday'] = 1

    # 是否周日要上班
    total_balance['is_work_on_sunday'] = 0
    for index, row in total_balance.iterrows():
        if index == len(total_balance)-1:
            break
        if row['weekday'] == 6 and row['is_work'] == 1:
             total_balance.loc[index, 'is_work_on_sunday'] = 1
                
    # 是否是月初第一天
    total_balance['is_firstday_of_month'] = 0
    total_balance.loc[total_balance['day'] == 1, 'is_firstday_of_month'] = 1

    # 是否是月初第二天
    total_balance['is_secday_of_month'] = 0
    total_balance.loc[total_balance['day'] == 2, 'is_secday_of_month'] = 1

    # 是否是月初
    total_balance['is_premonth'] = 0
    total_balance.loc[total_balance['day'] <= 10, 'is_premonth'] = 1

    # 是否是月中
    total_balance['is_midmonth'] = 0
    total_balance.loc[(10 < total_balance['day']) & (total_balance['day'] <= 20), 'is_midmonth'] = 1

    # 是否是月末
    total_balance['is_tailmonth'] = 0
    total_balance.loc[20 < total_balance['day'], 'is_tailmonth'] = 1

    # 是否是每个月第一个周
    total_balance['is_first_week'] = 0
    total_balance.loc[total_balance['week'] % 4 == 1, 'is_first_week'] = 1

    # 是否是每个月第一个周
    total_balance['is_second_week'] = 0
    total_balance.loc[total_balance['week'] % 4 == 2, 'is_second_week'] = 1

    # 是否是每个月第一个周
    total_balance['is_third_week'] = 0
    total_balance.loc[total_balance['week'] % 4 == 3, 'is_third_week'] = 1

    # 是否是每个月第四个周
    total_balance['is_fourth_week'] = 0
    total_balance.loc[total_balance['week'] % 4 == 0, 'is_fourth_week'] = 1
    
    return total_balance.reset_index(drop=True)
# 提取is特征到数据集

total_balance = extract_is_feature(total_balance)
# 编码翌日特征

def encode_data(data: pd.DataFrame, feature_name:str = 'weekday', encoder=OneHotEncoder())->pd.DataFrame():
    total_balance = data.copy()
    week_feature = encoder.fit_transform(np.array(total_balance[feature_name]).reshape(-1, 1)).toarray()
    week_feature = pd.DataFrame(week_feature,columns= [feature_name + '_onehot_'+ str(x) for x in range(len(week_feature[0]))])
    #featureWeekday = pd.concat([total_balance, week_feature], axis = 1).drop(feature_name, axis=1)
    featureWeekday = pd.concat([total_balance, week_feature], axis = 1)
    return featureWeekday
# 编码翌日特征到数据集

total_balance = encode_data(total_balance)
# 生成is特征集合

feature = total_balance[[x for x in total_balance.columns if x not in date_indexs]]

1.2 is特征的下标签分布分析

# 绘制箱型图

def draw_boxplot(data: pd.DataFrame)->None:
    f, axes = plt.subplots(7, 4, figsize=(18, 24))
    global date_indexs, labels
    count = 0
    for i in [x for x in data.columns if x not in date_indexs + labels + ['date']]:
        sns.boxenplot(x=i, y='total_redeem_amt', data=data, ax=axes[count // 4][count % 4])
        count += 1
draw_boxplot(feature)
## 剔除看起来较差的特征

redeem_feature_seems_useless = [
      #样本量太少,建模时无效;但若确定这是一个有用规则,可以对结果做修正
      'is_work_on_sunday',
      #中位数差异不明显
      'is_first_week'
]

1.3 IS 特征的相关性分析

# 画相关性热力图

def draw_correlation_heatmap(data: pd.DataFrame, way:str = 'pearson')->None:
    feature = data.copy()
    plt.figure(figsize=(20,10))
    plt.title('The ' + way +' coleration between total redeem and each feature')
    sns.heatmap(feature[[x for x in feature.columns if x not in ['total_redeem_amt', 'date'] ]].corr(way),linecolor='white',
        linewidths=0.1,
        cmap="RdBu")
draw_correlation_heatmap(feature, 'spearman')
# 剔除相关性较低的特征

temp = np.abs(feature[[x for x in feature.columns 
                       if x not in ['total_purchase_amt', 'date'] ]].corr('spearman')['total_redeem_amt'])
feature_low_correlation = list(set(temp[temp < 0.1].index))

二、基于距离的特征

2.1 距离特征提取

# 提取距离特征

def extract_distance_feature(data: pd.DataFrame)->pd.DataFrame:
    total_balance = data.copy()
    
    # 距离放假还有多少天
    total_balance['dis_to_nowork'] = 0
    for index, row in total_balance.iterrows():
        if row['is_work'] == 0:
            step = 1
            flag = 1
            while flag:
                if index - step >= 0 and total_balance.loc[index - step, 'is_work'] == 1:
                    total_balance.loc[index - step, 'dis_to_nowork'] = step
                    step += 1
                else:
                    flag = 0

    total_balance['dis_from_nowork'] = 0
    step = 0
    for index, row in total_balance.iterrows():
        step += 1
        if row['is_work'] == 1:
            total_balance.loc[index, 'dis_from_nowork'] = step
        else:
            step = 0

    # 距离上班还有多少天
    total_balance['dis_to_work'] = 0
    for index, row in total_balance.iterrows():
        if row['is_work'] == 1:
            step = 1
            flag = 1
            while flag:
                if index - step >= 0 and total_balance.loc[index - step, 'is_work'] == 0:
                    total_balance.loc[index - step, 'dis_to_work'] = step
                    step += 1
                else:
                    flag = 0

    total_balance['dis_from_work'] = 0
    step = 0
    for index, row in total_balance.iterrows():
        step += 1
        if row['is_work'] == 0:
            total_balance.loc[index, 'dis_from_work'] = step
        else:
            step = 0


    # 距离节假日还有多少天
    total_balance['dis_to_holiday'] = 0
    for index, row in total_balance.iterrows():
        if row['is_holiday'] == 1:
            step = 1
            flag = 1
            while flag:
                if index - step >= 0 and total_balance.loc[index - step, 'is_holiday'] == 0:
                    total_balance.loc[index - step, 'dis_to_holiday'] = step
                    step += 1
                else:
                    flag = 0

    total_balance['dis_from_holiday'] = 0
    step = 0
    for index, row in total_balance.iterrows():
        step += 1
        if row['is_holiday'] == 0:
            total_balance.loc[index, 'dis_from_holiday'] = step
        else:
            step = 0

    # 距离节假日最后一天还有多少天
    total_balance['dis_to_holiendday'] = 0
    for index, row in total_balance.iterrows():
        if row['is_lastday_of_holiday'] == 1:
            step = 1
            flag = 1
            while flag:
                if index - step >= 0 and total_balance.loc[index - step, 'is_lastday_of_holiday'] == 0:
                    total_balance.loc[index - step, 'dis_to_holiendday'] = step
                    step += 1
                else:
                    flag = 0

    total_balance['dis_from_holiendday'] = 0
    step = 0
    for index, row in total_balance.iterrows():
        step += 1
        if row['is_lastday_of_holiday'] == 0:
            total_balance.loc[index, 'dis_from_holiendday'] = step
        else:
            step = 0

    # 距离月初第几天
    total_balance['dis_from_startofmonth'] = np.abs(total_balance['day'])

    # 距离月的中心点有几天
    total_balance['dis_from_middleofmonth'] = np.abs(total_balance['day'] - 15)

    # 距离星期的中心有几天
    total_balance['dis_from_middleofweek'] = np.abs(total_balance['weekday'] - 3)

    # 距离星期日有几天
    total_balance['dis_from_endofweek'] = np.abs(total_balance['weekday'] - 6)

    return total_balance
# 拼接距离特征到原数据集

total_balance = extract_distance_feature(total_balance)

2.2 距离特征分析

# 获取距离特征的列名

feature = total_balance[[x for x in total_balance.columns if x not in date_indexs]]
dis_feature_indexs = [x for x in feature.columns if (x not in date_indexs + labels + ['date']) & ('dis' in x)]
# 画点线

def draw_point_feature(data: pd.DataFrame)->None:
    feature = data.copy()
    f, axes = plt.subplots(data.shape[1] // 3, 3, figsize=(30, data.shape[1] // 3 * 4))
    count = 0
    for i in [x for x in feature.columns if (x not in date_indexs + labels + ['date'])]:
        sns.pointplot(x=i, y="total_redeem_amt",
                markers=["^", "o"], linestyles=["-", "--"],
                kind="point", data=feature, ax=axes[count // 3][count % 3] if data.shape[1] > 3 else axes[count])
        count += 1
draw_point_feature(feature[['total_redeem_amt'] + dis_feature_indexs])
# 处理距离过远的时间点

def dis_change(x):
    if x > 5:
        x = 10
    return x
# 处理特殊距离

dis_holiday_feature = [x for x in total_balance.columns if 'dis' in x and 'holi' in x]
dis_month_feature = [x for x in total_balance.columns if 'dis' in x and 'month' in x]
total_balance[dis_holiday_feature] = total_balance[dis_holiday_feature].applymap(dis_change)
total_balance[dis_month_feature] = total_balance[dis_month_feature].applymap(dis_change)
feature = total_balance[[x for x in total_balance.columns if x not in date_indexs]]
# 画处理后的点线图

draw_point_feature(feature[['total_redeem_amt'] + dis_feature_indexs])
## 剔除看起来用处不大的特征
redeem_feature_seems_useless += [
                                  #即使做了处理,但方差太大,不可信,规律不明显
                                  'dis_to_holiday',
                                  #方差太大,不可信
                                  'dis_from_startofmonth',
                                  #方差太大,不可信
                                  'dis_from_middleofmonth'
]
# 画出相关性图

draw_correlation_heatmap(feature[['total_redeem_amt'] + dis_feature_indexs])
# 剔除相关性较差的特征

temp = np.abs(feature[[x for x in feature.columns 
                       if ('dis' in x) | (x in ['total_redeem_amt']) ]].corr()['total_redeem_amt'])
feature_low_correlation += list(set(temp[temp < 0.1].index) )

三、波峰波谷特征

3.1 提取波峰特征

# 观察波峰特点

fig = plt.figure(figsize=(15,15))
for i in range(6, 10):
    plt.subplot(5,1,i - 5)
    total_balance_2 = total_balance[(total_balance['date'] >= datetime.datetime(2014,8,1)) & (total_balance['date'] < datetime.datetime(2014,9,1))]
    sns.pointplot(x=total_balance_2['day'],y=total_balance_2['total_redeem_amt'])
    plt.legend().set_title('Month:' + str(i))
No handles with labels found to put in legend.
No handles with labels found to put in legend.
No handles with labels found to put in legend.
No handles with labels found to put in legend.
#redeem

#0401(周二)                                     0406(周日,清明节第二天)
#0410(周四,与周二近似)                         0412(周六,与周日近似)
#0415(周二)                                   0420(周日)
#0424(周四,与周二在近似水平)                 0427(周日)
#0429(周二)                                   0502(周五,劳动节第二天)
#0507(周三,与周二差异较大,可能受劳务节影响) 0511(周日)
#0512(周一,与周二有一定差距)                 0518(周日)
#0519(周二)                                   0525(周日)
#0526(周一,与周二有一定差距)                 0531(周六,月末)
#0605(周四,与周二差异大,可能受端午节影响)   0607(周六,可能受端午节影响)
#0609(周一,与周二近似)                       0615(周日)
#0616(周一,与周二差异大)                     0622(周日)
#0626(周四,与周二差异不大)                   0629(周日)
#0701(周二)                                   0705(周六,与周日差距不大)
#0707(周一,与周二有差距)                     0713(周日)
#0716(周三,与周二有一定差距)                 0720(周日)
#0721(周一,与周二有明显差距)                 0726(周六,与周日近似)
#0728(周一,与周二有明显差距)                 0803(周日)
#0805(周二)                                   0809(周六,与周日有较大差距)
#0811(周一,有周二有较大差距)                 0817(周日)
#0818(周一,与周二差距不大)                   0824(周日)
# 设定波峰日期

def extract_peak_feature(data: pd.DataFrame)->pd.DataFrame:
    total_balance = data.copy()
    # 距离redeem波峰(即周二)有几天
    total_balance['dis_from_redeem_peak'] = np.abs(total_balance['weekday'] - 1)

    # 距离redeem波谷(即周日)有几天,与dis_from_endofweek相同
    total_balance['dis_from_redeem_valley'] = np.abs(total_balance['weekday'] - 6)
    
    return total_balance
# 提取波峰特征

total_balance = extract_peak_feature(total_balance)
feature = total_balance[[x for x in total_balance.columns if x not in date_indexs]]

3.2 分析波峰特征

draw_point_feature(feature[['total_redeem_amt'] + ['dis_from_redeem_peak','dis_from_redeem_valley']])

3.3 分析波峰特征相关性

temp = np.abs(feature[[x for x in feature.columns if ('peak' in x) or ('valley' in x) or (x in ['total_redeem_amt']) ]].corr()['total_redeem_amt'])

四、加入周期因子作为特征

4.1 提取周期因子

def generate_rate(df, month_index):
    total_balance = df.copy()
    pure_balance = total_balance[['date','total_purchase_amt','total_redeem_amt']]
    pure_balance = pure_balance[(pure_balance['date'] >= datetime.datetime(2014,3,1)) & (pure_balance['date'] < datetime.datetime(2014, month_index, 1))]
    pure_balance['weekday'] = pure_balance['date'].dt.weekday
    pure_balance['day'] = pure_balance['date'].dt.day
    pure_balance['week'] = pure_balance['date'].dt.week
    pure_balance['month'] = pure_balance['date'].dt.month
    weekday_rate = pure_balance[['weekday']+labels].groupby('weekday',as_index=False).mean()
    for name in labels:
        weekday_rate = weekday_rate.rename(columns={name: name+'_weekdaymean'})
    weekday_rate['total_purchase_amt_weekdaymean'] /= np.mean(pure_balance['total_purchase_amt'])
    weekday_rate['total_redeem_amt_weekdaymean'] /= np.mean(pure_balance['total_redeem_amt'])
    pure_balance = pd.merge(pure_balance, weekday_rate, on='weekday', how='left')
    weekday_count = pure_balance[['day','weekday','date']].groupby(['day','weekday'],as_index=False).count()
    weekday_count = pd.merge(weekday_count, weekday_rate, on = 'weekday')
    weekday_count['total_purchase_amt_weekdaymean'] *= weekday_count['date'] / (len(set(pure_balance['month'])) - 1)
    weekday_count['total_redeem_amt_weekdaymean'] *= weekday_count['date'] / (len(set(pure_balance['month'])) - 1)
    day_rate = weekday_count.drop(['weekday','date'],axis=1).groupby('day',as_index=False).sum()
    weekday_rate.columns = ['weekday','purchase_weekdayrate','redeem_weekdayrate']
    day_rate.columns = ['day','purchase_dayrate','redeem_dayrate']
    day_rate['date'] = datetime.datetime(2014, month_index, 1)
    for index, row in day_rate.iterrows():
        if month_index in (2,4,6,9) and row['day'] == 31:
            continue
        day_rate.loc[index, 'date'] = datetime.datetime(2014, month_index, int(row['day']))
    day_rate['weekday'] = day_rate['date'].dt.weekday
    day_rate = pd.merge(day_rate, weekday_rate, on='weekday')
    day_rate['purchase_dayrate'] = day_rate['purchase_weekdayrate'] / day_rate['purchase_dayrate']
    day_rate['redeem_dayrate'] = day_rate['redeem_weekdayrate'] / day_rate['redeem_dayrate']
    weekday_rate['month'] = month_index
    day_rate['month'] = month_index
    
    return weekday_rate, day_rate[['day','purchase_dayrate','redeem_dayrate','month']].sort_values('day')
# 生成周期因子并合并到数据集

weekday_rate_list = []
day_rate_list = []
for i in range(3, 10):
    weekday_rate, day_rate = generate_rate(total_balance, i)
    weekday_rate_list.append(weekday_rate.reset_index(drop=True))
    day_rate_list.append(day_rate.reset_index(drop=True))

weekday_rate_list = pd.concat(weekday_rate_list).reset_index(drop=True)
day_rate_list = pd.concat(day_rate_list).reset_index(drop=True)
total_balance = pd.merge(total_balance, weekday_rate_list, on=['weekday','month'], how='left')
total_balance = pd.merge(total_balance, day_rate_list, on=['day','month'], how='left')
# 对周期因子进行特殊处理

for i in [x for x in total_balance.columns 
          if 'rate' in x and x not in labels + date_indexs]:
    total_balance[i] = total_balance[i].fillna(np.nanmedian(total_balance[i]))

4.2 分析周期因子的相关性

# 画出相关性图

draw_correlation_heatmap(total_balance[['total_redeem_amt'] 
                                       + [x for x in total_balance.columns 
                                          if 'rate' in x and x not in labels + date_indexs]])
# 剔除相关性低的特征

feature = total_balance.drop(date_indexs, axis=1)

五、加入动态时序特征

5.1 提取动态特征

## 提取动态特征

def get_amtfeature_with_time(data: pd.DataFrame)->pd.DataFrame:
    df_tmp_ = data[labels + date_indexs + ['date']].copy()
    total_balance = data.copy()
    
    df_tmp_ = df_tmp_[(df_tmp_['date']>=datetime.datetime(2014,3,3))]
    df_tmp_['weekday'] = df_tmp_['date'].dt.weekday + 1
    df_tmp_['week'] = df_tmp_['date'].dt.week - min(df_tmp_['date'].dt.week) + 1
    df_tmp_['day'] = df_tmp_['date'].dt.day
    df_tmp_['month'] = df_tmp_['date'].dt.month
    df_tmp_.reset_index(inplace=True)
    del df_tmp_['index']
    df_redeem = pd.DataFrame(columns = ['weekday1','weekday2','weekday3','weekday4',
                                          'weekday5','weekday6','weekday7'])
    count = 0

    for i in range(len(df_tmp_)):
        df_redeem.loc[count,'weekday'+str(df_tmp_.loc[i,'weekday'])] = df_tmp_.loc[i,'total_redeem_amt']
        if df_tmp_.loc[i,'weekday'] == 7:
            count = count + 1

    df_tmp_['redeem_weekday_median'] = np.nan
    df_tmp_['redeem_weekday_mean'] = np.nan
    df_tmp_['redeem_weekday_min'] = np.nan
    df_tmp_['redeem_weekday_max'] = np.nan
    df_tmp_['redeem_weekday_std'] = np.nan
    df_tmp_['redeem_weekday_skew'] = np.nan

    for i in range(len(df_tmp_)):
        #从2014年3月31日开始统计
        if i > 4*7-1:
            df_tmp_.loc[i,'redeem_weekday_median'] = df_redeem.loc[:df_tmp_.loc[i,'week']-2,
                                          'weekday'+str(df_tmp_.loc[i,'weekday'])].median()

            df_tmp_.loc[i,'redeem_weekday_mean'] = df_redeem.loc[:df_tmp_.loc[i,'week']-2,
                                          'weekday'+str(df_tmp_.loc[i,'weekday'])].mean()

            df_tmp_.loc[i,'redeem_weekday_min'] = df_redeem.loc[:df_tmp_.loc[i,'week']-2,
                                          'weekday'+str(df_tmp_.loc[i,'weekday'])].min()    

            df_tmp_.loc[i,'redeem_weekday_max'] = df_redeem.loc[:df_tmp_.loc[i,'week']-2,
                                          'weekday'+str(df_tmp_.loc[i,'weekday'])].max()   

            df_tmp_.loc[i,'redeem_weekday_std'] = df_redeem.loc[:df_tmp_.loc[i,'week']-2,
                                          'weekday'+str(df_tmp_.loc[i,'weekday'])].std() 

            df_tmp_.loc[i,'redeem_weekday_skew'] = df_redeem.loc[:df_tmp_.loc[i,'week']-2,
                                          'weekday'+str(df_tmp_.loc[i,'weekday'])].skew() 

    colList = ['redeem_weekday_median','redeem_weekday_mean','redeem_weekday_min',
               'redeem_weekday_max','redeem_weekday_std','redeem_weekday_skew']
    total_balance = pd.merge(total_balance, df_tmp_[colList+['day','month']], on=['day','month'], how='left')
    return total_balance
# 合并特征到数据集

total_balance = get_amtfeature_with_time(total_balance)
# 对动态特征做特殊处理

for i in [x for x in total_balance.columns 
          if '_weekday_' in x and x not in labels + date_indexs]:
    total_balance[i] = total_balance[i].fillna(np.nanmedian(total_balance[i]))

5.2 分析动态特征相关性

# 绘制动态特征的相关性图

draw_correlation_heatmap(total_balance[['total_redeem_amt'] + 
                                      ['redeem_weekday_median',
                                      'redeem_weekday_mean','redeem_weekday_min',
                                       'redeem_weekday_max','redeem_weekday_std',
                                       'redeem_weekday_skew'
                                      ]])
feature[labels + ['dis_to_nowork', 'dis_to_work', 'dis_from_work', 'purchase_weekdayrate',
       'redeem_dayrate', 'weekday_onehot_5', 'weekday_onehot_6',
       'dis_from_nowork', 'is_holiday', 'weekday_onehot_1', 'weekday_onehot_2',
       'weekday_onehot_0', 'dis_from_middleofweek', 'dis_from_holiendday',
       'weekday_onehot_3', 'is_lastday_of_holiday', 'is_firstday_of_holiday',
       'weekday_onehot_4', 'is_worked_yestday', 'is_second_week',
       'is_third_week', 'dis_from_startofmonth', 'dis_from_holiday', 'total_purchase_amt',
       'total_redeem_amt', 'date']].to_csv('Feature/0615_residual_redeem_origined.csv', index=False)

特征劣汰剔除

1.1 剔除无法有效分割数据集的特征

feature.info()
<class 'pandas.core.frame.DataFrame'>
Int64Index: 198 entries, 0 to 197
Data columns (total 47 columns):
 #   Column                  Non-Null Count  Dtype         
---  ------                  --------------  -----         
 0   date                    198 non-null    datetime64[ns]
 1   total_purchase_amt      154 non-null    float64       
 2   total_redeem_amt        154 non-null    float64       
 3   is_weekend              198 non-null    int64         
 4   is_holiday              198 non-null    int64         
 5   is_firstday_of_holiday  198 non-null    int64         
 6   is_lastday_of_holiday   198 non-null    int64         
 7   is_firstday_of_work     198 non-null    int64         
 8   is_work                 198 non-null    int64         
 9   is_gonna_work_tomorrow  198 non-null    int64         
 10  is_worked_yestday       198 non-null    int64         
 11  is_lastday_of_workday   198 non-null    int64         
 12  is_work_on_sunday       198 non-null    int64         
 13  is_firstday_of_month    198 non-null    int64         
 14  is_secday_of_month      198 non-null    int64         
 15  is_premonth             198 non-null    int64         
 16  is_midmonth             198 non-null    int64         
 17  is_tailmonth            198 non-null    int64         
 18  is_first_week           198 non-null    int64         
 19  is_second_week          198 non-null    int64         
 20  is_third_week           198 non-null    int64         
 21  is_fourth_week          198 non-null    int64         
 22  weekday_onehot_0        198 non-null    float64       
 23  weekday_onehot_1        198 non-null    float64       
 24  weekday_onehot_2        198 non-null    float64       
 25  weekday_onehot_3        198 non-null    float64       
 26  weekday_onehot_4        198 non-null    float64       
 27  weekday_onehot_5        198 non-null    float64       
 28  weekday_onehot_6        198 non-null    float64       
 29  dis_to_nowork           198 non-null    int64         
 30  dis_from_nowork         198 non-null    int64         
 31  dis_to_work             198 non-null    int64         
 32  dis_from_work           198 non-null    int64         
 33  dis_to_holiday          198 non-null    int64         
 34  dis_from_holiday        198 non-null    int64         
 35  dis_to_holiendday       198 non-null    int64         
 36  dis_from_holiendday     198 non-null    int64         
 37  dis_from_startofmonth   198 non-null    int64         
 38  dis_from_middleofmonth  198 non-null    int64         
 39  dis_from_middleofweek   198 non-null    int64         
 40  dis_from_endofweek      198 non-null    int64         
 41  dis_from_redeem_peak    198 non-null    int64         
 42  dis_from_redeem_valley  198 non-null    int64         
 43  purchase_weekdayrate    198 non-null    float64       
 44  redeem_weekdayrate      198 non-null    float64       
 45  purchase_dayrate        198 non-null    float64       
 46  redeem_dayrate          198 non-null    float64       
dtypes: datetime64[ns](1), float64(13), int64(33)
memory usage: 74.2 KB
feature[feature['total_redeem_amt'].isna()]
datetotal_purchase_amttotal_redeem_amtis_weekendis_holidayis_firstday_of_holidayis_lastday_of_holidayis_firstday_of_workis_workis_gonna_work_tomorrow...dis_from_startofmonthdis_from_middleofmonthdis_from_middleofweekdis_from_endofweekdis_from_redeem_peakdis_from_redeem_valleypurchase_weekdayrateredeem_weekdayratepurchase_dayrateredeem_dayrate
1542014-09-01NaNNaN0000010...11036161.1752731.2387911.1417401.216698
1552014-09-02NaNNaN0000010...21025051.1965451.1737841.1649361.128108
1562014-09-03NaNNaN0000010...31014141.1814541.1503691.2111711.230268
1572014-09-04NaNNaN0000010...41003231.1385721.0335301.1674221.019674
1582014-09-05NaNNaN0000010...51012320.9045240.9690880.9236761.002414
1592014-09-06NaNNaN1110000...101021410.6929670.6938310.7365950.726607
1602014-09-07NaNNaN1100000...101030500.7106650.7406070.6624960.691469
1612014-09-08NaNNaN0101001...101036161.1752731.2387911.1417401.216698
1622014-09-09NaNNaN0000110...101025051.1965451.1737841.1649361.128108
1632014-09-10NaNNaN0000010...10514141.1814541.1503691.2111711.230268
1642014-09-11NaNNaN0000010...10403231.1385721.0335301.1674221.019674
1652014-09-12NaNNaN0000010...10312320.9045240.9690880.9236761.002414
1662014-09-13NaNNaN1000000...10221410.6929670.6938310.7365950.726607
1672014-09-14NaNNaN1000001...10130500.7106650.7406070.6624960.691469
1682014-09-15NaNNaN0000010...10036161.1752731.2387911.1417401.216698
1692014-09-16NaNNaN0000010...10125051.1965451.1737841.1649361.128108
1702014-09-17NaNNaN0000010...10214141.1814541.1503691.2111711.230268
1712014-09-18NaNNaN0000010...10303231.1385721.0335301.1674221.019674
1722014-09-19NaNNaN0000010...10412320.9045240.9690880.9236761.002414
1732014-09-20NaNNaN1000000...10521410.6929670.6938310.7365950.726607
1742014-09-21NaNNaN1000001...101030500.7106650.7406070.6624960.691469
1752014-09-22NaNNaN0000010...101036161.1752731.2387911.1417401.216698
1762014-09-23NaNNaN0000010...101025051.1965451.1737841.1649361.128108
1772014-09-24NaNNaN0000010...101014141.1814541.1503691.2111711.230268
1782014-09-25NaNNaN0000010...101003231.1385721.0335301.1674221.019674
1792014-09-26NaNNaN0000010...101012320.9045240.9690880.9236761.002414
1802014-09-27NaNNaN1000001...101021410.6929670.6938310.7365950.726607
1812014-09-28NaNNaN1000010...101030500.7106650.7406070.6624960.691469
1822014-09-29NaNNaN0000010...101036161.1752731.2387911.1417401.216698
1832014-09-30NaNNaN0000010...101025051.1965451.1737841.1649361.128108
1842014-10-01NaNNaN0110000...11014141.1259991.0142351.1148071.013956
1852014-10-02NaNNaN0100000...21003231.1259991.0142351.1148071.013956
1862014-10-03NaNNaN0100000...31012321.1259991.0142351.1148071.013956
1872014-10-04NaNNaN1100000...41021411.1259991.0142351.1148071.013956
1882014-10-05NaNNaN1100000...51030501.1259991.0142351.1148071.013956
1892014-10-06NaNNaN0100000...101036161.1259991.0142351.1148071.013956
1902014-10-07NaNNaN0101001...101025051.1259991.0142351.1148071.013956
1912014-10-08NaNNaN0000110...101014141.1259991.0142351.1148071.013956
1922014-10-09NaNNaN0000010...101003231.1259991.0142351.1148071.013956
1932014-10-10NaNNaN0000010...10512321.1259991.0142351.1148071.013956
1942014-10-11NaNNaN1000000...10421411.1259991.0142351.1148071.013956
1952014-10-12NaNNaN1000001...10330501.1259991.0142351.1148071.013956
1962014-10-13NaNNaN0000010...10236161.1259991.0142351.1148071.013956
1972014-10-14NaNNaN0000010...10125051.1259991.0142351.1148071.013956

44 rows × 47 columns

feature1 = feature[(feature['date']<datetime.datetime(2014,9,1))]
feature1.info()
<class 'pandas.core.frame.DataFrame'>
Int64Index: 154 entries, 0 to 153
Data columns (total 47 columns):
 #   Column                  Non-Null Count  Dtype         
---  ------                  --------------  -----         
 0   date                    154 non-null    datetime64[ns]
 1   total_purchase_amt      154 non-null    float64       
 2   total_redeem_amt        154 non-null    float64       
 3   is_weekend              154 non-null    int64         
 4   is_holiday              154 non-null    int64         
 5   is_firstday_of_holiday  154 non-null    int64         
 6   is_lastday_of_holiday   154 non-null    int64         
 7   is_firstday_of_work     154 non-null    int64         
 8   is_work                 154 non-null    int64         
 9   is_gonna_work_tomorrow  154 non-null    int64         
 10  is_worked_yestday       154 non-null    int64         
 11  is_lastday_of_workday   154 non-null    int64         
 12  is_work_on_sunday       154 non-null    int64         
 13  is_firstday_of_month    154 non-null    int64         
 14  is_secday_of_month      154 non-null    int64         
 15  is_premonth             154 non-null    int64         
 16  is_midmonth             154 non-null    int64         
 17  is_tailmonth            154 non-null    int64         
 18  is_first_week           154 non-null    int64         
 19  is_second_week          154 non-null    int64         
 20  is_third_week           154 non-null    int64         
 21  is_fourth_week          154 non-null    int64         
 22  weekday_onehot_0        154 non-null    float64       
 23  weekday_onehot_1        154 non-null    float64       
 24  weekday_onehot_2        154 non-null    float64       
 25  weekday_onehot_3        154 non-null    float64       
 26  weekday_onehot_4        154 non-null    float64       
 27  weekday_onehot_5        154 non-null    float64       
 28  weekday_onehot_6        154 non-null    float64       
 29  dis_to_nowork           154 non-null    int64         
 30  dis_from_nowork         154 non-null    int64         
 31  dis_to_work             154 non-null    int64         
 32  dis_from_work           154 non-null    int64         
 33  dis_to_holiday          154 non-null    int64         
 34  dis_from_holiday        154 non-null    int64         
 35  dis_to_holiendday       154 non-null    int64         
 36  dis_from_holiendday     154 non-null    int64         
 37  dis_from_startofmonth   154 non-null    int64         
 38  dis_from_middleofmonth  154 non-null    int64         
 39  dis_from_middleofweek   154 non-null    int64         
 40  dis_from_endofweek      154 non-null    int64         
 41  dis_from_redeem_peak    154 non-null    int64         
 42  dis_from_redeem_valley  154 non-null    int64         
 43  purchase_weekdayrate    154 non-null    float64       
 44  redeem_weekdayrate      154 non-null    float64       
 45  purchase_dayrate        154 non-null    float64       
 46  redeem_dayrate          154 non-null    float64       
dtypes: datetime64[ns](1), float64(13), int64(33)
memory usage: 57.8 KB
# 画出各个特征分割数据集的分布估计图
plt.figure(figsize=(4 * 6, 6 * len(feature1.columns) / 6))
count = 0
for i in [x for x in feature1.columns 
          if (x not in labels + date_indexs + ['date']) 
          & ('amt' not in x) & ('dis' not in x) & ('rate' not in x)]:
    count += 1
    if feature1[feature1[i] == 0].empty:
        continue
    plt.subplot(len(feature1.columns) / 4, 4, count)
    
    ax = sns.kdeplot(feature1[feature1[i] == 0]['total_redeem_amt'], label= str(i) + ' == 0, redeem')
    ax = sns.kdeplot(feature1[feature1[i] == 1]['total_redeem_amt'], label= str(i) + ' == 1, redeem')
# 剔除对数据集划分不明显的特征

redeem_feature_seems_useless += ['is_gonna_work_tomorrow','is_fourth_week','weekday_onehot_4']

1.2 使用MVTest挽回一些有依赖性但是不相关的特征

feature_low_correlation

[‘is_firstday_of_month’, ‘weekday_onehot_3’, ‘is_second_week’, ‘weekday_onehot_4’, ‘is_fourth_week’, ‘is_third_week’, ‘is_first_week’, ‘is_worked_yestday’, ‘is_lastday_of_workday’, ‘is_premonth’, ‘dis_from_middleofweek’, ‘dis_from_middleofmonth’, ‘dis_from_holiendday’, ‘dis_from_startofmonth’]

# MVtest Ref: https://github.com/ChuanyuXue/MVTest

l = mvtest.mvtest()

name_list = []
Tn_list = []
p_list = []
for i in [i for i in feature_low_correlation if 'is' in i or 'discret' in i]:
    pair = l.test(feature['total_redeem_amt'], feature[i])
    name_list.append(str(i))
    Tn_list.append(pair['Tn'])
    p_list.append(pair['p-value'][0])
temp = pd.DataFrame([name_list,Tn_list]).T.sort_values(1)
temp[1] = np.abs(temp[1])
feature_saved_from_mv_redeem = list(temp.sort_values(1, ascending=False)[temp[1] > 0.5984][0])

1.3 剔除复共线特征

feature = feature[[x for x in feature.columns 
                   if (x not in feature_low_correlation + redeem_feature_seems_useless) or\
                   (x in feature_saved_from_mv_redeem )]]
redeem_cors = feature.corr()
redeem_cors['total_redeem_amt'] = np.abs(redeem_cors['total_redeem_amt'])
feature_lists = list(redeem_cors.sort_values(by='total_redeem_amt',ascending=False).index)[2:]
feature_temp = feature.dropna()
# 这里要注意 保留的时候按照相关性降序排序 剔除按照相关性升序排序的顺序
thershold = 0.8
for i in range(len(feature_lists)):
    for k in range(len(feature_lists)-1, -1, -1):
        if i >= len(feature_lists) or k >= len(feature_lists) or i == k:
            break
        if np.abs(np.corrcoef(feature_temp[feature_lists[i]], feature_temp[feature_lists[k]])[0][1]) > thershold:
            higher_feature_temp = feature_temp[feature_lists[i]] * feature_temp[feature_lists[k]]
            if np.abs(np.corrcoef(feature_temp[feature_lists[i]], higher_feature_temp)[0][1]) <= thershold:
                name = str(feature_lists[i]) + '%%%%' + str(feature_lists[k])
                feature_temp[name] = higher_feature_temp
                feature[name] = feature[feature_lists[i]] * feature[feature_lists[k]]
                feature_lists.append(name)
            feature_temp = feature_temp.drop(feature_lists[k], axis=1)
            feature_lists.remove(feature_lists[k])
feature = feature[[x for x in feature_lists if x not in labels] + labels + ['date']]
feature_lists

[‘is_work’, ‘dis_from_redeem_valley’, ‘total_purchase_amt’, ‘purchase_weekdayrate’, ‘redeem_dayrate’, ‘weekday_onehot_5’, ‘weekday_onehot_6’, ‘is_holiday’, ‘dis_from_nowork’, ‘weekday_onehot_0’, ‘is_tailmonth’, ‘weekday_onehot_1’, ‘is_firstday_of_holiday’, ‘weekday_onehot_2’, ‘is_lastday_of_holiday’, ‘dis_from_holiday’, ‘is_firstday_of_work’, ‘is_secday_of_month’, ‘is_midmonth’, ‘dis_from_holiendday’, ‘is_work%%%%is_weekend’, ‘dis_from_redeem_valley%%%%dis_from_redeem_peak’]

feature.to_csv('Feature/redeem_feature_droped_0614.csv',index=False)

选出优胜特征

# 分割数据集

def split_data_underline(data):
    trainset = data[(datetime.datetime(2014,4,1) <= data['date']) & (data['date'] < datetime.datetime(2014,8,1))]
    testset = data[(datetime.datetime(2014,8,1) <= data['date']) & (data['date'] < datetime.datetime(2014,9,1))]
    return trainset, testset

1.1 使用SHAP包获取优胜特征

SHAP testues represent the fair score of features depending on their contribution towards the total score in the set of features.

SHAP also can visualize how the score changes when the feature testue is low/high on each data.

shap.initjs()
from sklearn import tree
model = tree.DecisionTreeRegressor()
train, test = split_data_underline(feature.dropna())
features = [x for x in train.columns if x not in date_indexs]
model.fit(train[features].drop(labels+['date'], axis=1), train['total_redeem_amt'])

explainer = shap.TreeExplainer(model)
shap_testues = explainer.shap_values(test[features].drop(labels+['date'], axis=1))

shap.summary_plot(shap_testues, test[features].drop(labels+['date'], axis=1), plot_type='bar')

shap.summary_plot(shap_testues, test[features].drop(labels+['date'], axis=1))

tree_important_redeem = pd.DataFrame(np.mean(np.abs(shap_testues), axis=0),[x for x in features if x not in labels + date_indexs + ['date']]).reset_index()
tree_important_redeem = tree_important_redeem.sort_values(0, ascending=False).reset_index(drop=True)
tree_important_redeem = list(tree_important_redeem[:20]['index'])
tree_important_redeem

[‘dis_from_nowork’, ‘is_tailmonth’, ‘redeem_dayrate’, ‘purchase_weekdayrate’, ‘is_midmonth’, ‘weekday_onehot_2’, ‘weekday_onehot_0’, ‘dis_from_redeem_valley%%%%dis_from_redeem_peak’, ‘dis_from_holiday’, ‘is_firstday_of_holiday’, ‘dis_from_redeem_valley’, ‘is_firstday_of_work’, ‘is_holiday’, ‘dis_from_holiendday’, ‘weekday_onehot_1’, ‘is_lastday_of_holiday’, ‘is_secday_of_month’, ‘weekday_onehot_5’, ‘weekday_onehot_6’, ‘is_work%%%%is_weekend’]

# 输出选择的特征

def draw_cloud(feature_index: List[str])->None:
    plt.figure(figsize=(20,10))
    plt.subplot(1,2,1)
    ciyun = WordCloud(background_color='white', max_font_size=40)
    ciyun.generate(text=''.join([x+' ' for x in feature_index if x != 'total_redeem_amt']))
    plt.imshow(ciyun, interpolation='bilinear')
    plt.axis("off")
draw_cloud(tree_important_redeem)

1.2 使用Permutation importance包获取优胜特征

SHAP testues represent the fair score of features depending on their contribution towards the total score in the set of features.

SHAP also can visualize how the score changes when the feature testue is low/high on each data.

model = LinearRegression()
train, test = split_data_underline(feature.dropna())
model.fit(train[features].drop(labels+['date'], axis=1), train['total_redeem_amt'])
perm = PermutationImportance(model, random_state=42).fit(test[features].drop(labels+['date'], axis=1), test['total_redeem_amt'])
liner_important_redeem = pd.DataFrame(np.abs(perm.feature_importances_), [x for x in features if x not in labels + date_indexs + ['date']]).reset_index()
eli5.show_weights(perm, feature_names=list(str(x) for x in features if x not in labels + ['date']))
liner_important_redeem = liner_important_redeem.sort_values(0, ascending=False).reset_index(drop=True)
liner_important_redeem = list(liner_important_redeem[:20]['index'])
draw_cloud(liner_important_redeem)

1.3 量特征集合取交集选出最终优胜特征

winer_features_redeem = list(set(tree_important_redeem)\
                     & set(liner_important_redeem))
draw_cloud(winer_features_redeem)
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值