Datawhale组队学习-金融时序数据挖掘实践-Task06建模预测
# 分割数据集
def split_data_underline(data: pd.DataFrame)->pd.DataFrame:
trainset = data[(datetime.datetime(2014,4,1) <= data['date']) & (data['date'] < datetime.datetime(2014,8,1))]
testset = data[(datetime.datetime(2014,8,1) <= data['date']) & (data['date'] < datetime.datetime(2014,9,1))]
return trainset, testset
def split_data_online(data: pd.DataFrame)->pd.DataFrame:
trainset = data[(datetime.datetime(2014,4,1) <= data['date']) & (data['date'] < datetime.datetime(2014,9,1))]
testset = data[(datetime.datetime(2014,9,1) <= data['date']) & (data['date'] < datetime.datetime(2014,10,1))]
return trainset, testset
# 定义评价函数
def AE(y: Iterable, yhat: Iterable)->Iterable:
return np.abs(y - yhat) / np.abs(y)
def total_AE(purchasehat: Iterable, redeemhat: Iterable, purchase: Iterable, redeem: Iterable, h: int = 0.3)->Iterable:
return sum(map(lambda x : np.exp(-x/h)*10, AE(purchase, purchasehat))) * 0.45 + sum(map(lambda x : np.exp(-x/h)*10, AE(redeem, redeemhat))) * 0.55
# 在不同的时间段对模型进行验证
def week_evalution_single(data: pd.DataFrame, model: object, types: str)->pd.DataFrame:
results = []
a_month = relativedelta(months=1)
for i in [datetime.datetime(2014, 8, 1), datetime.datetime(2014, 7, 25), datetime.datetime(2014, 7, 18), datetime.datetime(2014, 7, 11),
datetime.datetime(2014, 7, 4), datetime.datetime(2014, 6, 27), datetime.datetime(2014, 6,20)]:
trainset = data[(i - 4 * a_month <= data['date']) & (data['date'] < i)]
testset = data[(i <= data['date']) & (data['date'] < i + a_month)]
if len(testset) == 0 or len(trainset) == 0:
i = datetime.datetime(2014, 4, 20)
trainset = data[(i - 4 * a_month <= data['date']) & (data['date'] < i)]
testset = data[(i <= data['date']) & (data['date'] < datetime.datetime(2014, 9, 1))]
feature = [x for x in trainset.columns if x not in ['total_purchase_amt','total_redeem_amt','date']]
model.fit(X=trainset[feature], y=trainset['total_' + types + '_amt'])
result_lr = model.predict(testset[feature])
h = 0.3
results.append(sum(AE(testset['total_' + types + '_amt'], result_lr).apply(lambda x : np.exp(-x/h))*10))
return pd.DataFrame(results)
# 输出评级表格
def draw_eva_table(df: pd.DataFrame)->pd.DataFrame:
rest = df.copy()
rest['interval'] = [datetime.datetime(2014, 8, 1), datetime.datetime(2014, 7, 25), datetime.datetime(2014, 7, 18), datetime.datetime(2014, 7, 11),
datetime.datetime(2014, 7, 4), datetime.datetime(2014, 6, 27), datetime.datetime(2014, 6,20)]
return rest
# 对生成结果进行可视化
def visual(result_purchase_lr: Iterable, result_redeem_lr: Iterable, testset: pd.DataFrame)->None:
fig = plt.figure(figsize=(10,4))
plt.plot(testset['date'], result_purchase_lr, label='predicted_purchase')
plt.plot(testset['date'], testset['total_purchase_amt'], label='real_redeem')
plt.legend(loc='best')
plt.title("The distribution of real and predict purchase")
plt.xlabel("Time")
plt.ylabel("Amount")
plt.show()
fig = plt.figure(figsize=(10,4))
sns.barplot(testset['date'].dt.day ,result_purchase_lr - testset['total_purchase_amt'])
fig = plt.figure(figsize=(10,4))
plt.plot(testset['date'], result_redeem_lr, label='predicted_redeem')
plt.plot(testset['date'], testset['total_redeem_amt'], label='real_redeem')
plt.legend(loc='best')
plt.title("The distribution of real and predict redeem")
plt.xlabel("Time")
plt.ylabel("Amount")
plt.show()
fig = plt.figure(figsize=(10,4))
sns.barplot(testset['date'].dt.day ,result_redeem_lr - testset['total_redeem_amt'])
# 定义提取线下最好效果特征的函数
def feature_extract(data: pd.DataFrame, model: object, types: str)->Tuple[List[str], List[float]]:
features = [x for x in data.columns if x not in labels + ['date']]
random.shuffle(features)
results = []
score = -1
for i in features:
score_update = np.mean(week_evalution_single(data[results + [i] + labels + ['date']], model, types))
if score_update > score:
score = score_update
results.append(i)
return results, score
def robust_feature_extract(data: pd.DataFrame, model: object, types: str):
results = []
score = -1
for i in range(10):
results_update, score_update = feature_extract(data, model, types)
if score_update > score:
score = score_update
results = results_update
print(results_update, score_update)
return results
# 定义AIC,BIC评价指标
def AIC(L: Iterable, delta: float, n_features: int):
return L * np.log10(delta) + 2 * (n_features + 1)
def AIC(L: Iterable, delta: float, n_features: int):
return L * np.log10(delta) + (n_features + 1) * np.log10(L)
# 使用AIC指标融合模型
def feature_extract_AIC(data: pd.DataFrame, model: object, types: str)->Tuple[List[str], float]:
features = [x for x in data.columns if x not in labels + ['date']]
random.shuffle(features)
results = []
test_score = 1e9
train_score = 0
for i in features:
test_score_update = np.mean(week_evalution_single(data[results + [i] + labels + ['date']], model, types)[0])
if test_score_update < test_score:
test_score = test_score_update
results.append(i)
trainset, testset = split_data_underline(data)
feature = results
model.fit(X=trainset[feature], y=trainset['total_' + types + '_amt'])
train_result_lr = model.predict(trainset[feature])
delta = mean_squared_error(train_result_lr, trainset['total_' + types + '_amt'])
#delta = np.sum(AE(trainset['total_' + types + '_amt'], train_result_lr).apply(lambda x : np.exp(-x/0.1))*10)
return results, AIC(len(trainset), delta, len(feature))
def multi_model(data: pd.DataFrame, model: object, types: str)->Tuple[List[List[str]], float]:
features = []
weights = []
for i in range(100):
results_update, score_update = feature_extract_AIC(data, model, types)
features.append(results_update)
weights.append(score_update)
avg = np.mean(weights)
weights = [x - avg for x in weights]
weights = [np.power((-1 * x / 2), 10) for x in weights]
summ = np.sum(weights)
weights = [x / summ for x in weights]
return features, weights
# 生成线上结果
def generate_online_result(df: pd.DataFrame, feature: Iterable, model = LinearRegression(), target:str = 'total_purchase_amt')->Iterable:
trainset, testset = split_data_online(df)
model.fit(X=trainset[feature], y=trainset[target])
result_purchase_lr = model.predict(testset[feature])
return result_purchase_lr
def generate_under_result(df: pd.DataFrame, feature: Iterable, model = LinearRegression(), target:str = 'total_purchase_amt')->Iterable:
trainset, testset = split_data_underline(df)
model.fit(X=trainset[feature], y=trainset[target])
result_purchase_lr = model.predict(testset[feature])
return result_purchase_lr
# 生成线上提交的格式
def normalize_upload_file(result_purchase_lr: Iterable, result_redeem_lr: Iterable, testset: pd.DataFrame)->pd.DataFrame:
testset['total_purchase_amt'] = result_purchase_lr
testset['total_redeem_amt'] = result_redeem_lr
online_upload = testset[['date','total_purchase_amt','total_redeem_amt']]
online_upload['date'] = online_upload['date'].astype(str)
online_upload['date'] = online_upload['date'].str.replace('-','')
return online_upload
# 线上结果可视化
def draw_result(result_purchase_lr: Iterable, result_redeem_lr: Iterable, testset: pd.DataFrame):
fig = plt.figure(figsize=(10,4))
plt.plot(testset['date'].dt.day, result_purchase_lr, label='online_purchase')
plt.plot(testset['date'].dt.day, result_redeem_lr, label='online_redeem')
plt.legend(loc='best')
plt.title("The predict values")
plt.xlabel("Time")
plt.ylabel("Amount")
# 重载DataFrame加法
def add_two_df(df1, df2, features = None, left_a = 0.45, right_a = 0.55):
data = df1.copy()
if not features:
features = [x for x in data.columns if x != 'interval']
for i in features:
data[i] = (data[i] * left_a + df2[i] * right_a)
return data
# 重载DataFrame乘法
def scale_df(df1, features = None, eta = 1):
data = df1.copy()
if not features:
features = [x for x in data.columns if x != 'interval']
for i in features:
data[i] *= eta
return data
建模测试
一、仅使用IS特征
data = pd.read_csv('Feature/feature0522.csv')
data['date'] = pd.to_datetime(data['date'])
trainset, testset = split_data_underline(data)
result_purchase_lr = generate_under_result(data, [x for x in data.columns if x not in ['total_purchase_amt','total_redeem_amt','date']], target='total_purchase_amt')
result_redeem_lr = generate_under_result(data, [x for x in data.columns if x not in ['total_purchase_amt','total_redeem_amt','date']], target='total_redeem_amt')
在八月份预测结果
total_AE(result_purchase_lr, result_redeem_lr, testset['total_purchase_amt'], testset['total_redeem_amt'])
181.33136087344428
滑窗测试结果
draw_eva_table(week_evalution_single(data, model=LinearRegression(), types = 'purchase'))
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
0 | interval | |
---|---|---|
0 | 183.065890 | 2014-08-01 |
1 | 171.457725 | 2014-07-25 |
2 | 171.287243 | 2014-07-18 |
3 | 178.279023 | 2014-07-11 |
4 | 155.622502 | 2014-07-04 |
5 | 172.069947 | 2014-06-27 |
6 | 170.600151 | 2014-06-20 |
draw_eva_table(week_evalution_single(data, LinearRegression(), 'redeem'))
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
0 | interval | |
---|---|---|
0 | 179.912201 | 2014-08-01 |
1 | 163.008995 | 2014-07-25 |
2 | 196.629135 | 2014-07-18 |
3 | 192.736956 | 2014-07-11 |
4 | 183.169703 | 2014-07-04 |
5 | 169.159389 | 2014-06-27 |
6 | 146.426813 | 2014-06-20 |
八月份预测图与真实图
visual(result_purchase_lr, result_redeem_lr, testset)
result_purchase_lr = generate_online_result(data, [x for x in trainset.columns if x not in ['total_purchase_amt','total_redeem_amt','date']], LinearRegression(),'total_purchase_amt')
result_redeem_lr = generate_online_result(data, [x for x in trainset.columns if x not in ['total_purchase_amt','total_redeem_amt','date']], LinearRegression(),'total_redeem_amt')
九月份预测效果图(线性)
trainset, testset = split_data_online(data)
draw_result(result_purchase_lr, result_redeem_lr, testset)
normalize_upload_file(result_purchase_lr, result_redeem_lr, testset).to_csv('20190612_only_is.csv',index=False,header=None)
二、多模型对比
def multi_model_eva(data, types:str = 'purchase'):
results = pd.DataFrame()
for model in [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor(), GradientBoostingRegressor(), MLPRegressor(solver='lbfgs'), xgb.XGBRegressor(objective='reg:squarederror')]:
if results.empty:
results = draw_eva_table(week_evalution_single(data, model, types)).rename(columns={0: repr(model).split('(')[0]})
else:
results = pd.merge(results, \
draw_eva_table(week_evalution_single(data, model, types)).rename(columns={0: repr(model).split('(')[0]}), on='interval')
results = results[['interval'] + [x for x in results.columns if x != 'interval']]
return results
add_two_df(multi_model_eva(data, 'purchase'), multi_model_eva(data, 'redeem'))
interval | LinearRegression | DecisionTreeRegressor | RandomForestRegressor | GradientBoostingRegressor | MLPRegressor | XGBRegressor | |
---|---|---|---|---|---|---|---|
0 | 2014-08-01 | 181.331361 | 169.030216 | 176.632192 | 176.240388 | 155.715286 | 167.801773 |
1 | 2014-07-25 | 166.810924 | 168.392443 | 160.215833 | 163.374931 | 153.219054 | 155.202897 |
2 | 2014-07-18 | 185.225284 | 175.335379 | 174.538846 | 181.007745 | 161.291556 | 169.279106 |
3 | 2014-07-11 | 186.230886 | 166.479808 | 168.981595 | 186.382701 | 163.734488 | 164.596057 |
4 | 2014-07-04 | 170.773462 | 158.429039 | 162.754653 | 159.026518 | 152.748522 | 161.770166 |
5 | 2014-06-27 | 170.469141 | 159.063455 | 165.006343 | 160.792327 | 152.552558 | 157.707240 |
6 | 2014-06-20 | 157.304815 | 159.322429 | 170.665713 | 160.345086 | 155.812109 | 159.126603 |
三、劣汰后特征对比
data_purchase = pd.read_csv('Feature/purchase_feature_droped_0614.csv')
data_purchase['date'] = pd.to_datetime(data_purchase['date'])
data_redeem = pd.read_csv('Feature/redeem_feature_droped_0614.csv')
data_redeem['date'] = pd.to_datetime(data_redeem['date'])
trainset_purchase, testset_purchase = split_data_underline(data_purchase)
result_purchase_lr = generate_under_result(data_purchase, [x for x in data_purchase.columns
if x not in ['total_purchase_amt','total_redeem_amt','date']],
target='total_purchase_amt')
trainset_redeem, testset_redeem = split_data_underline(data_redeem)
result_redeem_lr = generate_under_result(data_redeem, [x for x in data_redeem.columns
if x not in ['total_purchase_amt','total_redeem_amt','date']],
target='total_redeem_amt')
total_AE(result_purchase_lr, result_redeem_lr, testset_purchase['total_purchase_amt'], testset_redeem['total_redeem_amt'])
179.9616051090385
add_two_df(multi_model_eva(data_purchase, 'purchase'), multi_model_eva(data_redeem, 'redeem'))
interval | LinearRegression | DecisionTreeRegressor | RandomForestRegressor | GradientBoostingRegressor | MLPRegressor | XGBRegressor | |
---|---|---|---|---|---|---|---|
0 | 2014-08-01 | 179.961605 | 163.019673 | 175.398462 | 177.133802 | 184.750718 | 167.409556 |
1 | 2014-07-25 | 173.177626 | 169.003929 | 174.898641 | 170.808069 | 172.152014 | 158.406531 |
2 | 2014-07-18 | 185.057360 | 179.395250 | 180.712792 | 184.696870 | 181.826961 | 174.166971 |
3 | 2014-07-11 | 184.842161 | 171.521045 | 171.399065 | 173.751073 | 184.777610 | 174.109277 |
4 | 2014-07-04 | 175.685638 | 155.829969 | 165.596484 | 164.109854 | 172.663122 | 147.698808 |
5 | 2014-06-27 | 174.055984 | 171.387970 | 186.971153 | 181.847785 | 168.671175 | 166.659716 |
6 | 2014-06-20 | 160.951814 | 169.898831 | 167.979004 | 167.656092 | 163.895508 | 156.299719 |
八月份预测效果(线性)
trainset, testset = split_data_underline(data)
visual(result_purchase_lr, result_redeem_lr, testset)
result_purchase_lr = generate_online_result(data_purchase, [x for x in data_purchase.columns if x not in ['total_purchase_amt','total_redeem_amt','date']], LinearRegression(),'total_purchase_amt')
result_redeem_lr = generate_online_result(data_redeem, [x for x in data_redeem.columns if x not in ['total_purchase_amt','total_redeem_amt','date']], LinearRegression(),'total_redeem_amt')
生成线上效果(线性)
可以看到28号很高(work in Sunday)
trainset, testset = split_data_online(data)
draw_result(result_purchase_lr, result_redeem_lr, testset)
purchase feature
‘dis_to_nowork’, ‘dis_to_work’, ‘dis_from_work’, ‘purchase_weekdayrate’,
‘redeem_dayrate’, ‘weekday_onehot_5’, ‘weekday_onehot_6’,
‘dis_from_nowork’, ‘is_holiday’, ‘weekday_onehot_1’, ‘weekday_onehot_2’,
‘weekday_onehot_0’, ‘dis_from_middleofweek’, ‘dis_from_holiendday’,
‘weekday_onehot_3’, ‘is_lastday_of_holiday’, ‘is_firstday_of_holiday’,
‘weekday_onehot_4’, ‘is_worked_yestday’, ‘is_second_week’,
‘is_third_week’, ‘dis_from_startofmonth’, ‘dis_from_holiday’,
‘dis_to_nowork%%%%dis_from_purchase_peak’, ‘total_purchase_amt’,
‘total_redeem_amt’, ‘date’
Redeem feature
‘is_work’, ‘dis_from_redeem_valley’, ‘purchase_weekdayrate’,
‘redeem_dayrate’, ‘weekday_onehot_5’, ‘is_gonna_work_tomorrow’,
‘is_holiday’, ‘dis_from_nowork’, ‘weekday_onehot_0’, ‘weekday_onehot_1’,
‘is_firstday_of_holiday’, ‘weekday_onehot_2’, ‘is_lastday_of_holiday’,
‘dis_from_holiday’, ‘is_work_on_sunday’, ‘is_firstday_of_work’,
‘is_secday_of_month’, ‘dis_from_holiendday’,
‘dis_from_redeem_valley%%%%dis_from_redeem_peak’, ‘total_purchase_amt’,
‘total_redeem_amt’, ‘date’
normalize_upload_file(result_purchase_lr, result_redeem_lr, testset).to_csv('20190614_droped.csv',index=False,header=None)
生成线上效果(MLP)
result_purchase_lr = generate_online_result(data_purchase, [x for x in data_purchase.columns
if x not in ['total_purchase_amt','total_redeem_amt','date']],
MLPRegressor(solver='lbfgs'),'total_purchase_amt')
result_redeem_lr = generate_online_result(data_redeem, [x for x in data_redeem.columns
if x not in ['total_purchase_amt','total_redeem_amt','date']],
MLPRegressor(solver='lbfgs'),'total_redeem_amt')
trainset, testset = split_data_online(data)
draw_result(result_purchase_lr, result_redeem_lr, testset)
normalize_upload_file(result_purchase_lr, result_redeem_lr, testset).to_csv('20190614_droped_MLP.csv',index=False,header=None)
生成线上效果(Xgboost)
result_purchase_lr = generate_online_result(data_purchase, [x for x in data_purchase.columns
if x not in ['total_purchase_amt','total_redeem_amt','date']],
xgb.XGBRegressor(objective='reg:squarederror'),'total_purchase_amt')
result_redeem_lr = generate_online_result(data_redeem, [x for x in data_redeem.columns
if x not in ['total_purchase_amt','total_redeem_amt','date']],
xgb.XGBRegressor(objective='reg:squarederror'),'total_redeem_amt')
trainset, testset = split_data_online(data)
draw_result(result_purchase_lr, result_redeem_lr, testset)
normalize_upload_file(result_purchase_lr, result_redeem_lr, testset).to_csv('20190615_droped_XGB.csv',index=False,header=None)
四、AIC模型平均
purchase_features, purchase_weight = multi_model(data_purchase, model=LinearRegression(), types = 'purchase')
redeem_features, redeem_weight = multi_model(data_redeem, model=LinearRegression(), types = 'redeem')
def eva_for_aic(data_purchase, purchase_features, purchase_weight):
results = pd.DataFrame()
for index, feature in enumerate(purchase_features):
if results.empty:
results = scale_df(multi_model_eva(data_purchase[['date'] + labels + feature], 'purchase'),
eta = purchase_weight[index])
else:
results = add_two_df(results, multi_model_eva(data_purchase[['date'] + labels + feature], 'purchase')
, left_a = 1,
right_a = purchase_weight[index])
return results
add_two_df(eva_for_aic(data_purchase, purchase_features, purchase_weight),
eva_for_aic(data_redeem, redeem_features, redeem_weight))
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
interval | LinearRegression | DecisionTreeRegressor | RandomForestRegressor | GradientBoostingRegressor | MLPRegressor | XGBRegressor | |
---|---|---|---|---|---|---|---|
0 | 2014-08-01 | 188.741337 | 183.739410 | 187.347992 | 188.011877 | 191.037310 | 184.762524 |
1 | 2014-07-25 | 162.092486 | 165.415100 | 166.043526 | 167.257643 | 164.424520 | 166.576594 |
2 | 2014-07-18 | 163.022860 | 167.524104 | 168.983450 | 166.397735 | 165.082735 | 166.585841 |
3 | 2014-07-11 | 165.790349 | 172.584968 | 171.045943 | 173.085994 | 166.181061 | 171.459492 |
4 | 2014-07-04 | 158.676371 | 156.351233 | 155.651067 | 160.894718 | 160.442634 | 159.819203 |
5 | 2014-06-27 | 166.976077 | 171.751816 | 176.117904 | 173.648444 | 170.185229 | 172.523209 |
6 | 2014-06-20 | 167.495557 | 168.043302 | 172.188296 | 172.088764 | 168.723108 | 169.890986 |
五、针对残差建模
data_purchase = pd.read_csv('Feature/0615_residual_purchase_origined.csv')
data_purchase['date'] = pd.to_datetime(data_purchase['date'])
data_redeem = pd.read_csv('Feature/0615_residual_redeem_origined.csv')
data_redeem['date'] = pd.to_datetime(data_redeem['date'])
base = pd.read_csv('Data/base2.csv')
def generate_residual_result(data, base, model=LinearRegression(), types = 'purchase', split_time = datetime.datetime(2014,8,1)):
a_month = relativedelta(months=1)
trainset = data[(datetime.datetime(2014,4,1) <= data['date']) & (data['date'] < split_time)]
testset = data[(split_time <= data['date']) & (data['date'] < split_time + a_month)]
feature = [x for x in data_purchase.columns
if x not in ['total_purchase_amt','total_redeem_amt','date']]
model.fit(X=trainset[feature], y=trainset['total_' + types + '_amt'])
result_purchase_rate = model.predict(testset[feature])
base['date'] = pd.to_datetime(base['date'], format= "%Y%m%d")
result_purchase_cycle = np.array(base[(base['date'] >= split_time)
&(base['date'] < split_time + a_month)]['total_'+types+'_predicted_by_cycle'])
result_purchase_residual = result_purchase_rate * np.array(result_purchase_cycle)
return result_purchase_residual
def generate_evaluate_for_residual(model=LinearRegression()):
result = []
for i in [datetime.datetime(2014, 8, 1), datetime.datetime(2014, 7, 25), datetime.datetime(2014, 7, 18), datetime.datetime(2014, 7, 11),
datetime.datetime(2014, 7, 4), datetime.datetime(2014, 6, 27), datetime.datetime(2014, 6,20)]:
result_purchase_residual = generate_residual_result(data_purchase, base, model=model, types='purchase', split_time = i)
result_redeem_residual = generate_residual_result(data_purchase, base, model=model, types='redeem', split_time= i)
a_month = relativedelta(months=1)
testset = data[(data['date'] >= i) & (data['date'] < i + a_month)]
real_purchase = testset['total_purchase_amt']
real_redeem = testset['total_redeem_amt']
result.append(total_AE(result_purchase_residual, result_redeem_residual, real_purchase, real_redeem))
return pd.DataFrame(result)
def multi_model_eva_for_residual():
results = pd.DataFrame()
for model in [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor(), GradientBoostingRegressor(), MLPRegressor(solver='lbfgs'), xgb.XGBRegressor(objective='reg:squarederror')]:
if results.empty:
results = draw_eva_table(generate_evaluate_for_residual(model)).rename(columns={0: repr(model).split('(')[0]})
else:
results = pd.merge(results, \
draw_eva_table(generate_evaluate_for_residual(model)).rename(columns={0: repr(model).split('(')[0]}))
results = results[['interval'] + [x for x in results.columns if x != 'interval']]
return results
def generate_evaluate_for_cycle():
result = []
for i in [datetime.datetime(2014, 8, 1), datetime.datetime(2014, 7, 25), datetime.datetime(2014, 7, 18), datetime.datetime(2014, 7, 11),
datetime.datetime(2014, 7, 4), datetime.datetime(2014, 6, 27), datetime.datetime(2014, 6,20)]:
a_month = relativedelta(months=1)
testset = base[(base['date'] >= i) & (base['date'] < i + a_month)].reset_index(drop=True)
result_purchase_residual = testset['total_purchase_predicted_by_cycle']
result_redeem_residual = testset['total_redeem_predicted_by_cycle']
testset = data[(data['date'] >= i) & (data['date'] < i + a_month)].reset_index(drop=True)
real_purchase = testset['total_purchase_amt']
real_redeem = testset['total_redeem_amt']
result.append(total_AE(result_purchase_residual, result_redeem_residual, real_purchase, real_redeem))
return pd.DataFrame(result).rename(columns={0: 'PureTimeSeries'})
pd.merge(multi_model_eva_for_residual(), draw_eva_table(generate_evaluate_for_cycle()))
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
interval | LinearRegression | DecisionTreeRegressor | RandomForestRegressor | GradientBoostingRegressor | MLPRegressor | XGBRegressor | PureTimeSeries | |
---|---|---|---|---|---|---|---|---|
0 | 2014-08-01 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 175.933714 |
1 | 2014-07-25 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 155.916275 |
2 | 2014-07-18 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 169.890622 |
3 | 2014-07-11 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 165.668307 |
4 | 2014-07-04 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 155.458113 |
5 | 2014-06-27 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 160.758547 |
6 | 2014-06-20 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 154.336379 |
(1) 只使用周期因子在8月份的预测效果
_, testset = split_data_underline(data)
real_purchase = testset['total_purchase_amt']
real_redeem = testset['total_redeem_amt']
result_purchase_cycle = np.array(base[(base['date'] >= datetime.datetime(2014,8,1))&(base['date'] < datetime.datetime(2014,9,1))]['total_purchase_predicted_by_cycle'])
result_redeem_cycle = np.array(base[(base['date'] >= datetime.datetime(2014,8,1))&(base['date'] < datetime.datetime(2014,9,1))]['total_redeem_predicted_by_cycle'])
total_AE(result_purchase_cycle, result_redeem_cycle, real_purchase, real_redeem)
175.93371418259747
trainset, testset = split_data_underline(data)
visual(result_purchase_cycle, result_redeem_cycle, testset)
(2) 只使用周期因子+预测残差在8月份的预测效果(比单纯用因子好)
trainset_purchase, testset_purchase = split_data_underline(data_purchase)
result_purchase_rate = generate_under_result(data_purchase, [x for x in data_purchase.columns
if x not in ['total_purchase_amt','total_redeem_amt','date']],
target='total_purchase_amt')
trainset_redeem, testset_redeem = split_data_underline(data_redeem)
result_redeem_rate = generate_under_result(data_redeem, [x for x in data_redeem.columns
if x not in ['total_purchase_amt','total_redeem_amt','date']],
target='total_redeem_amt')
total_AE(result_purchase_rate * result_purchase_cycle, result_redeem_rate * result_redeem_cycle, real_purchase, real_redeem)
0.0
trainset, testset = split_data_underline(data)
visual(result_purchase_rate * result_purchase_cycle, result_redeem_rate * result_redeem_cycle, testset)
(3) 生成线上结果
#data_purchase1 = data_purchase[(data_purchase['date']<datetime.datetime(2014,9,1))]
data_purchase = data_purchase.fillna(0)
data_redeem = data_redeem.fillna(0)
trainset_purchase, testset_purchase = split_data_online(data_purchase)
result_purchase_rate = generate_online_result(data_purchase, [x for x in data_purchase.columns
if x not in ['total_purchase_amt','total_redeem_amt','date']],
target='total_purchase_amt')
trainset_redeem, testset_redeem = split_data_online(data_redeem)
result_redeem_rate = generate_online_result(data_redeem, [x for x in data_redeem.columns
if x not in ['total_purchase_amt','total_redeem_amt','date']],
target='total_redeem_amt')
# 修正一下预测结果试试
result_purchase_rate = result_purchase_rate / np.mean(result_purchase_rate)
result_redeem_rate = result_redeem_rate / np.mean(result_redeem_rate)
result_purchase_cycle = np.array(base[(base['date'] >= datetime.datetime(2014,9,1))&(base['date'] < datetime.datetime(2014,10,1))]['total_purchase_predicted_by_cycle'])
result_redeem_cycle = np.array(base[(base['date'] >= datetime.datetime(2014,9,1))&(base['date'] < datetime.datetime(2014,10,1))]['total_redeem_predicted_by_cycle'])
result_purchase_residual = result_purchase_rate * result_purchase_cycle
result_redeem_residual = result_redeem_rate * result_redeem_cycle
月份周期因子线上结果(135)
draw_result(result_purchase_cycle, result_redeem_cycle, testset_redeem)
残差处理后结果
draw_result(result_purchase_residual, result_redeem_residual, testset_redeem)
normalize_upload_file(result_purchase_residual, result_redeem_residual, testset_redeem).to_csv('20190622_residual_liner.csv',index=False,header=None)