机器学习交叉验证选择模型

非时序和时序数据进行交叉验证方法

# 设置交叉验证集的折数
from sklearn.model_selection import cross_val_score, KFold
kf = KFold(n_splits=10, random_state=42, shuffle=False)
# kf = KFold(n_splits=5, random_state=42, shuffle=False)
# 时间序列分割
from sklearn.model_selection import TimeSeriesSplit
tscv = TimeSeriesSplit(max_train_size=None, n_splits=17)
def cv_rmse(model, train_X, train_y):
    rmse= np.sqrt(-cross_val_score(model, train_X, train_y, scoring="neg_mean_squared_error", cv = kf))
    return(rmse)
def cv_mae(model, train_X, train_y):
    cv_mae = np.mean(-cross_val_score(model, train_X, train_y, scoring="neg_mean_absolute_error", cv = tscv))
    return cv_mae
def cv_mae_(model, train_X, train_y):
    val_loss = -cross_val_score(model, train_X, train_y, scoring="neg_mean_absolute_error", cv=tscv)
    print('val loss is: {0}'.format(val_loss))
    import matplotlib.pyplot as plt
    plt.plot(val_loss, marker = 'o')
    plt.show()
    cv_mae = np.mean(-cross_val_score(model, train_X, train_y, scoring="neg_mean_absolute_error", cv = tscv))
    return cv_mae


# 选择模型
from sklearn.linear_model import MultiTaskLassoCV
from sklearn.linear_model import ElasticNetCV
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
from sklearn.multioutput import MultiOutputRegressor
def build_model(train_X, train_y):

    mult_lasso = MultiTaskLassoCV()
    score_lasso_mean = cv_mae(mult_lasso, train_X, train_y)
    mult_lasso = mult_lasso.fit(train_X, train_y)

    elncv = ElasticNetCV()
    mult_elncv = MultiOutputRegressor(elncv)
    score_elncv_mean = cv_mae(mult_elncv, train_X, train_y)
    mult_elncv = mult_elncv.fit(train_X, train_y)

    # from sklearn.cross_decomposition import PLSRegression
    # plsr = PLSRegression()
    # mult_plsr = MultiOutputRegressor(plsr)
    # # score_plsr = cross_val_score(mult_plsr, train_X, train_y, scoring='neg_mean_absolute_error', cv=kf)
    # # score_plsr_mean = -score_plsr.mean()
    # mult_plsr = mult_plsr.fit(train_X, train_y)

    # from sklearn.linear_model import BayesianRidge
    # b_ridge = BayesianRidge()
    # mult_b_ridge = MultiOutputRegressor(b_ridge)
    # score_b_ridge = cross_val_score(mult_b_ridge, train_X, train_y, scoring='neg_mean_absolute_error', cv=kf)
    # score_b_ridge_mean = -score_b_ridge.mean()
    # mult_b_ridge = mult_b_ridge.fit(train_X, train_y)

    # import lightgbm as lgb
    # lgb = lgb.LGBMRegressor()
    # mult_lgb = MultiOutputRegressor(lgb)
    # score_lgb = cross_val_score(mult_lgb, train_X, train_y, scoring='neg_mean_absolute_error', cv=kf)
    # score_lgb_mean = -score_lgb.mean()
    # mult_lgb = mult_lgb.fit(train_X, train_y)

    # import catboost as cab
    # cab = cab.CatBoostRegressor()
    # mult_cab = MultiOutputRegressor(cab).fit(train_X, train_y)

    # from sklearn.neighbors import KNeighborsRegressor
    # knn = KNeighborsRegressor()
    # mult_knn = MultiOutputRegressor(knn)
    # score_knn = cross_val_score(mult_knn, train_X, train_y, scoring='neg_mean_absolute_error', cv=kf)
    # score_knn_mean = -score_knn.mean()
    # mult_knn = mult_knn.fit(train_X, train_y)

    # from sklearn.svm import SVR
    # #svr = SVR(kernel='rbf')
    # svr = SVR()
    # mult_svr = MultiOutputRegressor(svr)
    # score_svr = cross_val_score(mult_svr, train_X, train_y, scoring='neg_mean_absolute_error', cv=kf)
    # score_svr_mean = -score_svr.mean()
    # mult_svr = mult_svr.fit(train_X, train_y)

    # 决策树
    # 决策树
    dtr = DecisionTreeRegressor()
    mult_dtr = MultiOutputRegressor(dtr)
    # cv_mae_dtr = cv_mae_(mult_dtr, train_X, train_y)
    cv_mae_dtr = cv_mae(mult_dtr, train_X, train_y)
    mult_dtr = mult_dtr.fit(train_X, train_y)

    # 随机森林
    rf = RandomForestRegressor()
    mult_rf = MultiOutputRegressor(rf)
    # cv_mae_rf = cv_mae_(mult_rf, train_X, train_y)
    cv_mae_rf = cv_mae(mult_rf, train_X, train_y)
    mult_rf = mult_rf.fit(train_X, train_y)

    # adaboost
    adbt = AdaBoostRegressor(random_state=42)
    mult_adbt = MultiOutputRegressor(adbt)
    #cv_mae_adbt = cv_mae_(mult_adbt, train_X, train_y)
    cv_mae_adbt = cv_mae(mult_adbt, train_X, train_y)
    mult_adbt = mult_adbt.fit(train_X, train_y)
    # gbdt

    gbdt = GradientBoostingRegressor()
    mult_gbdt = MultiOutputRegressor(gbdt)
    # cv_mae_gbdt = cv_mae_(mult_gbdt, train_X, train_y)
    cv_mae_gbdt = cv_mae(mult_gbdt, train_X, train_y)
    mult_gbdt = mult_gbdt.fit(train_X, train_y)

    # xgb
    xgb = XGBRegressor()
    mult_xgb = MultiOutputRegressor(xgb)
    #cv_mae_xgb = cv_mae_(mult_xgb, train_X, train_y)
    cv_mae_xgb = cv_mae(mult_xgb, train_X, train_y)
    mult_xgb = mult_xgb.fit(train_X, train_y)

    return mult_adbt


# 保存模型
from sklearn.externals import joblib
def save_model(mult_model, model_save_path):
    """
    :param mult_model: 待保存的模型对象
    :param model_save_path: 保存路径,例如 "./model.pkl"
    :return: 没有返回值
    """
    joblib.dump(mult_model, model_save_path)
# 加载模型
def load_model(model_path):
    """
    :param model_path: 模型路径
    :return: 返回加载后的模型对象
    """
    return joblib.load(model_path)


# 训练模型
def train_model(train_X, train_y):
    adbt_best_params = {'n_estimators': 170, 'learning_rate': 0.21}
    dtr_best_params = {'splitter': 'best', 'max_depth': 8, 'min_samples_split': 0.11, 'min_samples_leaf': 0.03, 'random_state': 42}
    model = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(**dtr_best_params), **adbt_best_params)
    mult_model = MultiOutputRegressor(model)
    # 训练多任务
    mult_model = mult_model.fit(train_X, train_y)
    return mult_model



from parameter_optimize import parameter_optimize_adbt
if __name__ == '__main__':
    # 对 y 进行处理
    file_name = 'HLK-21C02(4℃).csv'  # 对第 1 阶段数据建模
    file_name_ = os.path.splitext(file_name)[0]
    n_cnt = 4
    df_file = data_preprocess(file_name_, n_cnt)

    df_file = df_file
    df_y =df_file

    # 划分训练集和测试集
    goals_list = [str(x) for x in range(0, n_cnt * 2)]  # y 标签
    features_list = [ele for ele in list(df_file.columns) if ele not in goals_list]  # x标签
    num_test = int(len(df_file)*0.2)  # 测试集数目
    test_y = df_y.loc[len(df_file) - num_test:][goals_list]
    test_y = test_y.reset_index(drop=True)
    df_train = df_file.loc[:len(df_file) - num_test-1]
    df_train = df_train.reset_index(drop=True)
    train_X = df_train[features_list]
    train_y = df_train[goals_list]
    df_test = df_file.loc[len(df_file) - num_test:]
    df_test = df_test.reset_index(drop=True)
    test_X = df_test[features_list]
    # 构建模型:选择模型
    #mult_model = build_model(train_X, train_y)
    # 模型参数优化,选择最佳参数: adbt
    #parameter_optimize_adbt(train_X, train_y)
    # 训练模型:
    mult_model = train_model(train_X, train_y)
    # 测试集预测
    res_pre = mult_model.predict(test_X)
    res_pre = np.round(res_pre)
    df_res = pd.DataFrame(res_pre)
    # 对预测值进行校正:大于 86400 的值则减去 86400
    df_res[0] = df_res[0].apply(lambda x: x-86400 if x>86400 else x)
    df_res[len(goals_list)-1] = df_res[len(goals_list)-1].apply(lambda x: x-86400 if x>86400 else x)
    # 预测值与真实值做差
    diff_y_yhat = df_res.values - test_y
    eval_model = np.mean(np.mean(abs(df_res.values - test_y.values)))
    eval_model_3 = np.mean(abs(df_res.values - test_y.values), axis=1)
    # 测试集评估模型
    from sklearn.metrics import mean_absolute_error
    eval_model_1 = mean_absolute_error(test_y.values, df_res, multioutput='raw_values')
    eval_model_2 = mean_absolute_error(test_y.values, df_res)

    # 对结果进行转换,并做差
    # 将测试集秒转化为时分秒
    # for goal in goals_list:
    #     goal = str(goal)
    #     test_y[goal] = seconds_to_hms_date_list(test_y[goal])
    #     # 将预测值转为时分秒
    #     goal = int(goal)
    #     df_res[goal] = seconds_to_hms_date_list(df_res[goal])

    print ()
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值