Numpy实现RandomForest

from __future__ import division, print_function
import numpy as np
import math
import progressbar

# Import helper functions
from mlfromscratch.utils import divide_on_feature, train_test_split, get_random_subsets, normalize
from mlfromscratch.utils import accuracy_score, calculate_entropy
from mlfromscratch.unsupervised_learning import PCA
from mlfromscratch.supervised_learning import ClassificationTree
from mlfromscratch.utils.misc import bar_widgets
from mlfromscratch.utils import Plot


class RandomForest():
    """Random Forest classifier. Uses a collection of classification trees that
    trains on random subsets of the data using a random subsets of the features.

    Parameters:
    -----------
    n_estimators: int
        The number of classification trees that are used.
    max_features: int
        The maximum number of features that the classification trees are allowed to
        use.
    min_samples_split: int
        The minimum number of samples needed to make a split when building a tree.
    min_gain: float
        The minimum impurity required to split the tree further. 
    max_depth: int
        The maximum depth of a tree.
    """
    def __init__(self, n_estimators=100, max_features=None, min_samples_split=2,
                 min_gain=0, max_depth=float("inf")):
        self.n_estimators = n_estimators    # Number of trees
        self.max_features = max_features    # Maxmimum number of features per tree
        self.min_samples_split = min_samples_split
        self.min_gain = min_gain            # Minimum information gain req. to continue
        self.max_depth = max_depth          # Maximum depth for tree
        self.progressbar = progressbar.ProgressBar(widgets=bar_widgets)

        # Initialize decision trees
        self.trees = []
        for _ in range(n_estimators):
            self.trees.append(
                ClassificationTree(
                    min_samples_split=self.min_samples_split,
                    min_impurity=min_gain,
                    max_depth=self.max_depth))

    def fit(self, X, y):
        n_features = np.shape(X)[1]
        # If max_features have not been defined => select it as
        # sqrt(n_features)
        if not self.max_features:
            self.max_features = int(math.sqrt(n_features))

        # Choose one random subset of the data for each tree
        subsets = get_random_subsets(X, y, self.n_estimators)

        for i in self.progressbar(range(self.n_estimators)):
            X_subset, y_subset = subsets[i]
            # Feature bagging (select random subsets of the features)
            idx = np.random.choice(range(n_features), size=self.max_features, replace=True)
            # Save the indices of the features for prediction
            self.trees[i].feature_indices = idx
            # Choose the features corresponding to the indices
            X_subset = X_subset[:, idx]
            # Fit the tree to the data
            self.trees[i].fit(X_subset, y_subset)

    def predict(self, X):
        y_preds = np.empty((X.shape[0], len(self.trees)))
        # Let each tree make a prediction on the data
        for i, tree in enumerate(self.trees):
            # Indices of the features that the tree has trained on
            idx = tree.feature_indices
            # Make a prediction based on those features
            prediction = tree.predict(X[:, idx])
            y_preds[:, i] = prediction
            
        y_pred = []
        # For each sample
        for sample_predictions in y_preds:
            # Select the most common class prediction
            y_pred.append(np.bincount(sample_predictions.astype('int')).argmax())
        return y_pred

以下是使用Extra Trees和Random Forest进行特征选择的Python示例代码。假设我们有一个包含特征和目标变量的数据集,并且我们要选择最重要的特征。 ```python import numpy as np import pandas as pd from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier # 读取数据集 data = pd.read_csv('dataset.csv') X = data.drop(columns=['target']) y = data['target'] # 创建Extra Trees分类器并拟合数据 et_clf = ExtraTreesClassifier() et_clf.fit(X, y) # 创建Random Forest分类器并拟合数据 rf_clf = RandomForestClassifier() rf_clf.fit(X, y) # 输出每个特征的重要性得分 print('Extra Trees feature importance scores:') print(et_clf.feature_importances_) print('Random Forest feature importance scores:') print(rf_clf.feature_importances_) # 选择最重要的特征 et_indices = np.argsort(et_clf.feature_importances_)[::-1] rf_indices = np.argsort(rf_clf.feature_importances_)[::-1] num_features = 10 # 选择前10个特征 et_selected_indices = et_indices[:num_features] rf_selected_indices = rf_indices[:num_features] et_selected_features = X.columns[et_selected_indices] rf_selected_features = X.columns[rf_selected_indices] print('Selected features using Extra Trees:', et_selected_features) print('Selected features using Random Forest:', rf_selected_features) ``` 上述代码将数据集加载到Pandas DataFrame中,然后使用`ExtraTreesClassifier`和`RandomForestClassifier`拟合数据,并输出每个特征的重要性得分。然后,我们选择最重要的前10个特征,并输出它们的名称。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

AI浩

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值