sklearn 回归分析汇总

data:

train.csv

f1,f2,f3,f4,f5
25290,806.6666667,1405,39810,2211.666667
25032,772.6111111,1390.666667,38939,2163.277778
24751,763.9444444,1375.055556,38502,2139
24550,757.7222222,1363.888889,38189,2121.611111
24294,749.8333333,1349.666667,37791,2099.5
24008,741,1333.777778,37346,2074.777778
23866,736.6111111,1325.888889,37125,2062.5
23715,731.9444444,1317.5,36890,2049.444444
23524,726.0555556,1306.888889,36593,2032.944444
23403,722.3333333,1300.166667,36405,2022.5
23232,717.0555556,1290.666667,36139,2007.722222
23065,711.8888889,1281.388889,35879,1993.277778
23041,711.1666667,1280.055556,35842,1991.222222
21607,666.8888889,1200.388889,33611,1867.277778

futrue.csv

f1,f2,f3,f4,f5
14520,25290,806.6666667,1405,39810,2211.666667
13907,25032,772.6111111,1390.666667,38939,2163.277778
13751,24751,763.9444444,1375.055556,38502,2139
13639,24550,757.7222222,1363.888889,38189,2121.611111
13497,24294,749.8333333,1349.666667,37791,2099.5
13338,24008,741,1333.777778,37346,2074.777778
13259,23866,736.6111111,1325.888889,37125,2062.5
13175,23715,731.9444444,1317.5,36890,2049.444444

配置文件:configbak.ini

[train data path]
trainDataPath = ./data/train_data/train.csv
[model path]
modelPath = ./model/uw_site/regression/
modelName = 20200226170610_uw_site_rf_model.model
[prediction result path]
predictionResultPath = ./prediction_result/
predictionResultName = predict_result_001.csv
[result filter threshold]
threshold = 0.5
# pandas 参数设置
[pandas option set]
max_rows = 100
max_columns = 500
[data split params]
test_size = 0.2
random_state = 0
[model params]
n_estimators = 60
max_depth = 10
min_samples_split = 20
min_samples_leaf = 20
oob_score = True
[elasticsearch]
es_data_path = data/ESData/
es_ip = 10.10.167.11
index_name = conductor
index_type = task
es_data_path_capacity = 20
es_data_path_buffer = 10
scheduler_freq = 10

程序

# coding=utf-8

from sklearn.model_selection import *
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
import pandas as pd
import configparser
import numpy as np
import matplotlib.pyplot as plt
from sklearn import *
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import ExtraTreeRegressor

# 获取配置信息
cf = configparser.ConfigParser()
cf.read("./configbak.ini")
trainDataPath = cf.get("train data path", "trainDataPath")  # 训练集
modelPath = cf.get("model path", "modelPath")  # 模型保存路径
test_size = cf.get("data split params", "test_size")  # 测试集分割占比
random_state = cf.get("data split params", "random_state")  # 分割方式
n_estimators = cf.get("model params", "n_estimators")  # 模型estimators数量
max_depth = cf.get("model params", "max_depth")  # 模型RF深度
min_samples_split = cf.get("model params", "min_samples_split")  # 最小采样分割
min_samples_leaf = cf.get("model params", 'min_samples_leaf')  # 模型leaf参数设定
oob_score = cf.get("model params", "oob_score")  # oob_score设定

# 数据加载
df_original_data = pd.read_csv(trainDataPath, encoding='gbk').drop_duplicates()
future_data = pd.read_csv("./data/train_data/future.csv", encoding="gbk").drop_duplicates()
futrue_x = future_data.ix[:, 'f1':'f4']
futrue_y = future_data.ix[:, 'f5']

# 特征选取,经过卡方检验特征选取出的最优特征子集
c_features_select = ['f1', 'f2', 'f3', 'f4']
trainData = df_original_data[c_features_select]
target = df_original_data['f5']

# 数据集划分
X_train, X_test, y_train, y_test = train_test_split(trainData,
                                                    target,
                                                    test_size=float(test_size),
                                                    random_state=int(random_state))

# 决策树回归
model_DecisionTreeRegressor = tree.DecisionTreeRegressor()
# 线性回归
model_LinearRegression = linear_model.LinearRegression()
# SVM回归
model_SVR = svm.SVR()
# KNN回归
model_KNeighborsRegressor = neighbors.KNeighborsRegressor()
# 随机森林回归
model_RandomForestRegressor = ensemble.RandomForestRegressor(n_estimators=20)  # 这里使用20个决策树
# Adaboost回归
model_AdaBoostRegressor = ensemble.AdaBoostRegressor(n_estimators=50)  # 这里使用50个决策树
# GBRT回归
model_GradientBoostingRegressor = ensemble.GradientBoostingRegressor(n_estimators=100)  # 这里使用100个决策树
# Bagging回归
model_BaggingRegressor = BaggingRegressor()
# ExtraTree极端随机树回归
model_ExtraTreeRegressor = ExtraTreeRegressor()
# ARD贝叶斯ARD回归
model_ARDRegression = linear_model.ARDRegression()
# BayesianRidge贝叶斯岭回归
model_BayesianRidge = linear_model.BayesianRidge()
# TheilSen泰尔森估算
model_TheilSenRegressor = linear_model.TheilSenRegressor()
# RANSAC随机抽样一致性算法
model_RANSACRegressor = linear_model.RANSACRegressor()
# 加载随机森林回归模型及模型训练
RF_model = RandomForestRegressor(n_estimators=int(n_estimators),
                                 max_depth=int(max_depth),
                                 min_samples_split=int(min_samples_split),
                                 min_samples_leaf=int(min_samples_leaf),
                                 oob_score=oob_score)


def featureImportant(X_train, model):
    """
    :param X_train: 训练集
    :param RF_model: 训练模型
    :return: 训练特征对模型的重要程度
    """
    feat_lables = X_train.columns[0:]
    importances = model.feature_importances_
    indices = np.argsort(importances)[::-1]
    for i in range(X_train.shape[1]):
        print("%2d) %-*s %f" % (i + 1, 30, feat_lables[indices[i]],
                                importances[indices[i]]))


def try_different_method(model_name, model):
    model.fit(X_train, y_train)
    score = model.score(X_test, y_test)
    result = model.predict(X_test)
    future = model.predict(futrue_x)
    # 模型保存
    joblib.dump(value=RF_model, filename=modelPath + model_name + "_uw_site_rf_model.model")
    # 模型评估 召回率,准确率,oob袋外准确率
    # lddutils.evaluation(RF_model, y_test, y_pred_test)
    # 特征贡献度评估

    featureImportant(X_train, RF_model)
    plt.figure(figsize=(15, 5))
    plt.plot(np.arange(len(result)), y_test, 'go-', label='true value')
    plt.plot(np.arange(len(result)), result, 'ro-', label='predict value')
    plt.plot(np.arange(len(result), len(result) + len(future)), futrue_y, 'yo-', label='future true value')
    plt.plot(np.arange(len(result), len(result) + len(future)), future, 'bo-', label='future predict value')
    plt.title(model_name + ', score: %f' % score)
    plt.legend()
    plt.show()


# 随机森林模型
try_different_method("随机森林模型", RF_model)
# 决策树模型
try_different_method("决策树模型", model_DecisionTreeRegressor)
# 线性回归模型
try_different_method("线性回归结果", model_LinearRegression)
# SVM回归模型
try_different_method("SVM回归结果", model_SVR)
# KNN回归模型
try_different_method("KNN回归结果", model_KNeighborsRegressor)
# Adaboost回归模型
try_different_method("Adaboost回归结果", model_AdaBoostRegressor)
# GBRT回归模型
try_different_method("GBRT回归结果", model_GradientBoostingRegressor)
# Bagging回归模型
try_different_method("Bagging回归结果", model_BaggingRegressor)
# 极端随机树回归模型
try_different_method("极端随机树回归结果", model_ExtraTreeRegressor)
# 贝叶斯ARD回归模型
try_different_method("贝叶斯ARD回归结果", model_ARDRegression)
# 贝叶斯岭回归模型
try_different_method("贝叶斯岭回归结果", model_BayesianRidge)
# 泰尔森估算回归模型
try_different_method("泰尔森估算回归结果", model_TheilSenRegressor)
# 随机抽样一致性算法模型
try_different_method("随机抽样一致性算法", model_RANSACRegressor)

模型保存:
在这里插入图片描述
模型性能评估及可视化:在这里插入图片描述

  • 1
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值