随机森林Python实现

该代码实现了一个基础分类器`Classifier`,用于数据预处理、模型训练和评估。它包括了数据的加载、缺失值处理、数据转换等功能。另外,还定义了一个`TWRandomForest`类,它是`Classifier`的子类,专门用于实现随机森林算法,包括模型初始化、训练和超参数调优。代码中还包含了对模型性能的评估和结果存储。
摘要由CSDN通过智能技术生成

基类实现

# _*_ coding: utf-8 _*_
# @Date : 2023/3/14 18:47
# @Author : Paul
# @File : classifiers.py
# @Description :
import pandas as pd
import io
import matplotlib.pyplot as plt
from core.utils.string_utils import StringUtils
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer

from core.algo.base_algo import BaseAlgo
from core.data_source.meta_data_source.meta_data_source import MetaDataSource
from core.utils.data_souce_init_utils import DataSourceInitUtil
from core.utils.date_util import DateUtil


class Classifier(BaseAlgo):

    def __init__(self,
                 app_name="clusters",
                 data_source_id=None,
                 table_name=None,
                 feature_cols=None,
                 class_col=None,
                 train_size=None,
                 param = None
                 ):
        """
        初始化类
        :param app_name:
        :param data_source_id:
        :param table_name:
        :param train_cols:
        """
        super(Classifier, self).__init__(app_name=app_name)
        self.param = param
        # 开始时间
        self.start_time = DateUtil.getCurrentDate()
        self.table_name = table_name
        self.feature_cols = feature_cols
        self.class_col = class_col
        self.train_size = train_size
        self.all_col = self.feature_cols + self.class_col
        self.clf = None
        # 数据的摘要概要
        self.info = None
        # 数据统计学估计
        self.describe = None
        # 数据二维分布图
        self.two_dim_dis_image = self.image_path + "two_dim_dis_" + app_name + "_" + DateUtil.getCurrentDateSimple() + ".png"
        # 预测效果分布图
        self.classifier_pred_image = self.image_path + "cluster_pred_" + app_name + "_" + DateUtil.getCurrentDateSimple() + ".png"
        # 获取元数据库
        self.meta_data_source = MetaDataSource()
        # 获取训练集所在的数据源
        self.data_source = DataSourceInitUtil.getDataBase(self.meta_data_source,
                                                          data_source_id)
        self.labels = None
        self.train_data_ratio = float(self.param["trainDataRatio"])

    def getModelData(self):
        """
        获取建模数据:输出训练集、测试集
        :return:
        """
        data_query_sql = "select {} from {}".format(",".join(self.all_col),
                         self.table_name)
        data = self.data_source.queryAll(data_query_sql)
        data = pd.DataFrame(data=data,
                                       columns = self.all_col)

        # 数据的简要摘要
        buf = io.StringIO()  # 创建一个StringIO,便于后续在内存中写入str
        data.info(buf=buf)  # 写入
        self.info = buf.getvalue()  # 读取

        # 统计学估计
        self.describe = data.describe()

        # 获取预处理策略值
        process_method_list_after_process = []
        self.param.get("preProcessMethodList")[0].get("preProcessFeature")
        process_method_list = self.param.get("preProcessMethodList")
        if len(process_method_list) > 0:
            for process_method in process_method_list:
                if process_method == None or process_method == "null":
                    continue
                pre_process_feature = process_method.get("preProcessFeature")
                if StringUtils.isBlack(pre_process_feature):
                    continue
                else:
                    process_method_list_after_process.append(process_method)
        self.param["preProcessMethodList"] = process_method_list_after_process
        if len(process_method_list_after_process) > 0:
            for process_method in process_method_list_after_process:
                pre_process_feature = process_method.get("preProcessFeature")
                preProcessMethod = process_method.get("preProcessMethod")
                preProcessMethodValue = process_method.get("preProcessMethodValue")

                #1.删除填充值
                if preProcessMethod == "deletena":
                    data.drop(pre_process_feature, inplace=True, axis=1)
                #2.替换缺失值
                elif preProcessMethod == "fillna":
                    if preProcessMethodValue == "mean":
                        imp_mean = SimpleImputer()
                        data[pre_process_feature] = imp_mean.fit_transform(data[pre_process_feature].values.reshape(-1,1))
                    elif preProcessMethodValue == "median":
                        imp_median = SimpleImputer(strategy="median")
                        data[pre_process_feature] = imp_median.fit_transform(data[pre_process_feature].values.reshape(-1,1))
                    elif preProcessMethodValue == "most_frequent":
                        imp_mode = SimpleImputer(strategy="most_frequent")
                        data[pre_process_feature] = imp_mode.fit_transform(data[pre_process_feature].values.reshape(-1,1))
                    elif preProcessMethodValue == "constant_0":
                        imp_0 = SimpleImputer(strategy="constant", fill_value=0)
                        data[pre_process_feature] = imp_0.fit_transform(data[pre_process_feature].values.reshape(-1,1))
                    elif preProcessMethodValue == "constant_1":
                        imp_1 = SimpleImputer(strategy="constant", fill_value=1)
                        data[pre_process_feature] = imp_1.fit_transform(data[pre_process_feature].values.reshape(-1,1))
                # 3.分类变量转换为数值变量
                elif preProcessMethod == "transClassFeature":
                    unique_value = data[pre_process_feature].unique().tolist()
                    data[pre_process_feature] = data[pre_process_feature].apply(lambda x: unique_value.index(x))
                # 4.类型转换
                elif preProcessMethod == "transType":
                    if preProcessMethodValue == "int":
                        data[pre_process_feature] = data[pre_process_feature].astype("int")
                    elif preProcessMethodValue == "float":
                        data[pre_process_feature] = data[pre_process_feature].astype("float")

        # # 处理缺失值,对缺失值较多的列进行填补,有一些特征只确实一两个值,可以采取直接删除记录的方法
        # data["Age"] = data["Age"].fillna(data["Age"].mean())
        # # 将分类变量转换为数值型变量
        # # 将二分类变量转换为数值型变量
        # # astype能够将一个pandas对象转换为某种类型,和apply(int(x))不同,astype可以将文本类转换为数字,用这个方式可以很便捷地将二分类特征转换为0~1
        # data["Sex"] = (data["Sex"] == "male").astype("int")
        # # 将三分类变量转换为数值型变量
        # self.labels = data["Embarked"].unique().tolist()
        # data["Embarked"] = data["Embarked"].apply(lambda x: self.labels.index(x))

        X = data.iloc[:, data.columns != self.class_col[0]]
        Y = data.iloc[:, data.columns == self.class_col[0]]
        # 数据无纲量化策略
        standardization = self.param["standardization"]
        if standardization == "MinMaxScaler":
            from sklearn.preprocessing import MinMaxScaler
            scaler = MinMaxScaler()
            X = scaler.fit_transform(X)
        elif standardization == "StandardScaler":
            from sklearn.preprocessing import StandardScaler
            scaler = StandardScaler()
            X = scaler.fit_transform(X)

        Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, train_size=self.train_data_ratio)
        return [Xtrain, Ytrain], [Xtest, Ytest]


if __name__ == '__main__':
    cluster = Classifier(app_name="cluster_demo",
                      data_source_id=9,
                      table_name="titanic",
                         feature_cols=["Survived", "Pclass", "Sex", "Age", "Cabin"],
                      class_col=["Embarked"],
                      train_size=0.7)
    cluster.getModelData()

随机森林代码实现

# _*_ coding: utf-8 _*_
# @Date : 2023/3/24 13:42
# @Author : Paul
# @File : random_forest.py
# @Description :
from core.beans.param_train_result import ParamTrainResult
import matplotlib.pyplot as plt
from classifiers.classifier import Classifier
from core.utils.log_util import LogUtil
from core.beans.classifier_result import ClassifierResult
from sklearn.ensemble import RandomForestClassifier
from core.utils.date_util import DateUtil
from core.utils.string_utils import StringUtils
import json
import sys

class TWRandomForest(Classifier):

    def __init__(self,
                 app_name="random_forest",
                 data_source_id=None,
                 table_name=None,
                 feature_cols=None,
                 class_col=None,
                 train_size=None,
                 param=None
                 ):
        """
        初始化
        :param app_name:
        :param data_source_id:
        :param table_name:
        :param feature_cols:
        :param class_col:
        :param train_size:
        """
        super(TWRandomForest, self).__init__(app_name=app_name,
                                             data_source_id=data_source_id,
                                             table_name=table_name,
                                             feature_cols=feature_cols,
                                             class_col=class_col,
                                             train_size=train_size,
                                             param=param)
        self.IS_MODEL_EVAL = True  # 默认:不需要评估模型
        # 预测效果分布图
        self.tree_pred_image = "descion_tree_pred_" + app_name + "_" + DateUtil.getCurrentDateSimple()

    def initModel(self):
        """
        初始化模型
        """
        algoParam = self.param["algoParam"]
        n_estimators = 100 if StringUtils.isBlack(algoParam["nEstimators"]) else int(algoParam["nEstimators"])
        criterion = "gini" if StringUtils.isBlack(algoParam["criterion"]) else algoParam["criterion"]
        randomState = None if StringUtils.isBlack(algoParam["randomState"]) else int(algoParam["randomState"])
        maxDepth = None if StringUtils.isBlack(algoParam["maxDepth"]) else int(algoParam["maxDepth"])
        minSamplesSplit = 2 if StringUtils.isBlack(algoParam["minSamplesSplit"]) else int(algoParam["minSamplesSplit"])
        minSamplesLeaf = 1 if StringUtils.isBlack(algoParam["minSamplesLeaf"]) else int(algoParam["minSamplesLeaf"])

        self.clf = RandomForestClassifier(n_estimators=n_estimators,
                                          criterion=criterion,
                                          random_state=randomState,
                                          max_depth=maxDepth,
                                          min_samples_split=minSamplesSplit,
                                          min_samples_leaf=minSamplesLeaf)

    def buildModel(self, train_data):
        """
        训练模型
        """
        Xtrain = train_data[0]
        Ytrain = train_data[1]
        self.clf = self.clf.fit(Xtrain, Ytrain)

    def evalModel(self, train_data, test_data):
        """
        评估模型
        """
        Xtest = test_data[0]
        Ytest = test_data[1]
        score_ = self.clf.score(Xtest, Ytest)
        var_importance = [*zip(self.feature_cols, self.clf.feature_importances_)]

        # 结束时间
        end_time = DateUtil.getCurrentDate()
        cost_second = DateUtil.diffMin(self.start_time, end_time)

        # 模型结果存入mysql
        algo_result = ClassifierResult(self.param["id"],
                                       "random_forest",
                                       self.param,
                                       self.app_name,
                                       self.info,
                                       self.describe,
                                       None,
                                       var_importance,
                                       score_,
                                       "sucess",
                                       self.start_time,
                                       end_time,
                                       cost_second)
        LogUtil.saveClassifierResult(self.meta_data_source, algo_result)

    def paramTrain(self):
        """
        超参数训练
        :return:
        """
        # 获取超参数训练参数
        param_train = self.param["paramTrain"]
        param_name = None if StringUtils.isBlack(param_train["paramName"]) else str(param_train["paramName"])
        param_start_value = None if StringUtils.isBlack(param_train["paramStartValue"]) else int(param_train["paramStartValue"])
        param_end_value = None if StringUtils.isBlack(param_train["paramEndValue"]) else int(param_train["paramEndValue"])
        param_range_value = None if StringUtils.isBlack(param_train["paramRangeValue"]) else int(param_train["paramRangeValue"])

        #测试数据
        train_data, test_data = self.getModelData()
        Xtrain = train_data[0]
        Ytrain = train_data[1]
        Xtest = test_data[0]
        Ytest = test_data[1]
        eval_value_list = []
        if param_name == None or param_start_value is None or param_end_value is None or param_range_value is None or param_start_value==1:
            error_info = "请确认参数必须为整数,且参数起始值不能为1"

            # 结束时间
            end_time = DateUtil.getCurrentDate()
            cost_second = DateUtil.diffMin(self.start_time, end_time)
            # 模型结果存入mysql
            param_train_result = ParamTrainResult(self.param["id"],
                                                  "random_forest",
                                                  self.param,
                                                  self.app_name,
                                                  error_info,
                                                  "failed",
                                                  self.start_time,
                                                  end_time,
                                                  cost_second)
            LogUtil.saveParamTrainResult(self.meta_data_source, param_train_result)
        else:
            for param_value in range(param_start_value, param_end_value, param_range_value):
                if param_name == "max_depth":
                    self.clf = RandomForestClassifier(max_depth=param_value)
                elif param_name == "n_estimators":
                    self.clf = RandomForestClassifier(n_estimators=param_value)
                self.clf = self.clf.fit(Xtrain, Ytrain)
                score_ = self.clf.score(Xtest, Ytest)
                eval_value_list.append(score_)
            # 保存结果
            param_train_image = self.image_path + "descion_tree_param_train_" + DateUtil.getCurrentDateSimple() + ".png"
            fig, ax = plt.subplots(1, 1)
            plot_titile = ""
            if param_name == "max_depth":
                plot_titile = "最大数据深度--超参数学习曲线"
            elif param_name == "n_estimators":
                plot_titile = "基评估器的数量--超参数学习曲线"
            ax.set_title(plot_titile)
            ax.plot([i for i in range(param_start_value, param_end_value, param_range_value)], eval_value_list)
            plt.savefig(param_train_image, dpi=300)
            # plt.show()

            # 结束时间
            end_time = DateUtil.getCurrentDate()
            cost_second = DateUtil.diffMin(self.start_time, end_time)
            # 模型结果存入mysql
            param_train_result = ParamTrainResult(self.param["id"],
                                               "random_forest",
                                                self.param,
                                                self.app_name,
                                                param_train_image,
                                                "success",
                                                self.start_time,
                                                end_time,
                                                cost_second)
            LogUtil.saveParamTrainResult(self.meta_data_source, param_train_result)

if __name__ == '__main__':
    argv = sys.argv[1]
    # argv = "{\"algoParam\":{\"criterion\":\"gini\",\"maxDepth\":\"\",\"minSamplesLeaf\":\"\",\"minSamplesSplit\":\"\",\"nEstimators\":\"100\",\"randomState\":\"\"},\"appName\":\"lr\",\"classCols\":\"Survived\",\"dataSourceId\":\"9\",\"featureCols\":\"Pclass,Sex,Age,SibSp,Parch,Fare,Embarked\",\"id\":\"1679637851329\",\"preProcessMethodList\":[{\"preProcessFeature\":\"Age\",\"preProcessMethod\":\"fillna\",\"preProcessMethodValue\":\"mean\"},{},{\"preProcessFeature\":\"Sex\",\"preProcessMethod\":\"transClassFeature\"},{\"preProcessFeature\":\"Embarked\",\"preProcessMethod\":\"transClassFeature\"}],\"tableName\":\"titanic\",\"trainDataRatio\":\"0.7\"}"
    param = json.loads(argv)
    app_name = param["appName"]
    data_source_id = param["dataSourceId"]
    table_name = param["tableName"]
    feature_cols = param["featureCols"]
    class_cols = param["classCols"]
    train_size = float(param["trainDataRatio"])
    class_cols_list = []
    if isinstance(class_cols, list):
        class_cols_list = class_cols
    else:
        class_cols_list.append(class_cols)
    classifier = TWRandomForest(app_name=app_name,
                                data_source_id=data_source_id,
                                table_name=table_name,
                                feature_cols=feature_cols,
                                class_col=class_cols_list,
                                train_size=train_size,
                                param=param)
    if "paramTrain" not in param.keys():
        classifier.execute()
    else:
        classifier.paramTrain()

详细代码见gitee

twinkle_algo_plat: 晓烁算法平台算法端

随机森林Python实现非常简单,你可以使用scikit-learn库中的RandomForestClassifier类来实现。下面是一个使用随机森林进行分类的示例代码: ```python from sklearn.ensemble import RandomForestClassifier from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report # 加载数据集 mnist = load_digits() # 分割数据集 x_train, x_test, y_train, y_test = train_test_split(mnist.data, mnist.target, test_size=0.25, random_state=33) # 创建随机森林分类器 rfc = RandomForestClassifier(n_jobs=-1) # 训练分类器 rfc.fit(x_train, y_train) # 预测 pred = rfc.predict(x_test) # 生成分类报告 report = classification_report(y_test, pred) # 打印分类报告 print(report) ``` 在这个例子中,我们首先导入了需要的库,包括RandomForestClassifier类和load_digits函数来加载手写数字数据集。然后,我们使用train_test_split函数将数据集分割为训练集和测试集。接下来,创建了一个RandomForestClassifier对象,并使用fit函数对模型进行训练。最后,使用predict函数对测试集进行预测,并使用classification_report函数生成分类报告。你可以根据自己的数据集和需求进行相应的修改和调整。 中的代码展示了如何使用随机森林进行分类任务,并输出了分类报告。你可以根据具体的数据集和需要进行相应的修改和调整。需要注意的是,这里使用的是手写数字数据集作为示例,你可以根据自己的数据集来进行相应的替换。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值