Decision Tree and Random Forest from scratch -- 决策树、随机森林Python 实现

决策树算法



import numpy as np
from collections import Counter

class Node:
    def __init__(self, feature=None, threshold=None, left=None, right=None, *, value = None):
        self.feature = feature
        self.threshold = threshold
        self.left = left
        self.right = right
        self.value = value
        
    def is_leaf_node(self):
        return self.value is not None
        

        
class DecisionTree:
    def __init__(self, min_samples_split=2, max_depth=20, n_split_feature=None):
        self.min_samples_split = min_samples_split
        self.max_depth = max_depth
        self.n_split_feature = n_split_feature
        self.root = None
    
    def fit(self, X, y):
        self.n_split_feature = X.shape[1] if not self.n_split_feature else min(X.shape[1],self.n_split_feature)
        self.root = self._grow_tree(X, y)
        
    def _grow_tree(self, X, y, depth=0):    
        n_samples, n_feats = X.shape
        n_labels = len(np.unique(y))
        
        #check the stopping criteria
        if (depth>=self.max_depth or n_labels==1 or n_samples<self.min_samples_split):
            leaf_value = self._most_common_label(y)
            return Node(value=leaf_value)
        
        # find the best split
        feat_idxs = np.random.choice(n_feats, self.n_split_feature, replace=False)
        best_feature, best_threshold = self._best_split(X,y,feat_idxs)
        
        # create child node
        left_idx, right_idx = self._split(X[:,best_feature],best_threshold)
        left_node = self._grow_tree(X[left_idx,:], y[left_idx], depth+1)
        right_node = self._grow_tree(X[right_idx,:], y[right_idx], depth+1)
        return Node(best_feature, best_threshold, left_node, right_node)
        
        
    def _best_split(self,X,y,feat_idxs):
        best_gain = -1
        split_idx, split_threshold = None, None
        
        for feat_idx in feat_idxs:
            X_column = X[:, feat_idx]
            thresholds = np.unique(X_column)
        
            for thres in thresholds:
            #calculate the information gain
                gain = self._information_gain(y, X_column, thres)
            
                if gain>best_gain:
                    best_gain = gain
                    split_idx = feat_idx
                    split_threshold = thres
        
        return split_idx, split_threshold
    
    def _split(self, X_column, split_threshold):
        left_idx = np.argwhere(X_column<=split_threshold).flatten()
        right_idx = np.argwhere(X_column>split_threshold).flatten()
        return left_idx, right_idx
    
    def _information_gain(self, y, X_column, split_threshold):
        # parent_entropy
        parent_entropy = self._entropy(y)
        
        # child entropy
        
        # create children
        left_idx, right_idx = self._split(X_column, split_threshold)
        
        # check if it is leaf_node
        if len(left_idx) == 0 or len(right_idx)==0:
            return 0
        # calculate the weighted avg. entropy of children
        n_l, n_r = len(left_idx),len(right_idx)
        left_entropy = self._entropy(y[left_idx])
        right_entropy = self._entropy(y[right_idx])
        weighted_entropy_sum = (n_l/len(y))*left_entropy+(n_r/len(y))*right_entropy
        
        # calculate the IG
        information_gain = parent_entropy - weighted_entropy_sum
        return information_gain
    
        
        
    def _entropy(self, y):
        hist = np.bincount(y)
        ps = hist / len(y)
        entropy = -np.sum([p*np.log(p) for p in ps if p>0])
        
        return entropy

    def _most_common_label(self,y):
        counter = Counter(y)
        value = counter.most_common(1)[0][0]
        return value


        
    def predict(self,X):
        return np.array([self._traverse_tree(x, self.root) for x in X])
            
        
    def _traverse_tree(self, x, node):
        if node.is_leaf_node():
            return node.value
        
        if x[node.feature] <= node.threshold:
            return self._traverse_tree(x, node.left)
        return self._traverse_tree(x, node.right)
   
class RandomForest:
    def __init__(self,n_trees = 50, max_depth = 20, min_samples_split=2,n_feature=None):
        self.n_trees = n_trees
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.n_feature = n_feature()
        self.trees = []
        
    def fit(self,X,y):
        self.trees = []
        for _ in range(self.n_trees):
            tree = DecisionTree(max_depth=self.max_depth,
                            min_samples_split=self.min_samples_split,
                            n_features=self.n_features)
            X_sample, y_sample = self._bootstrape(X, y)
            tree.fit(X_sample,y_sample)
            self.trees.append(tree)

        
        
    def _bootstrape(self, X ,y):
        n = X.shape[0]
        idx = np.random.choice(n,n,replace=True)
        return X[idx],y[idx]
        
    def predict(self, X):
        preds = np.array([tree.predict(X) for tree in self.trees])
        tree_predictions = np.swapaxes(preds,0,1)
        predictions = np.array([self._most_common_label(pred) for pred in tree_predictions])
        
    def _most_common_label(self, y):
        counter = Counter(y)
        most_common = counter.most_common(1)[0][0]
        
        return most_common
        
        
     
    




代码测试

#test
if __name__=='__main__':
    
    from sklearn import datasets
    from sklearn.model_selection import train_test_split
    import numpy as np
    
    data = datasets.load_breast_cancer()
    X, y = data.data, data.target
    
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=1234
    )
    
    def accuracy(y_test, y_pred):
        return np.sum(y_test == y_pred) / len(y_test)
    
    clfdt = DecisionTree(max_depth=10)
    clfdt.fit(X_train, y_train)
    predictions_dt = clfdt.predict(X_test)
    
    
    accdt = accuracy(y_test, predictions_dt)
    print('Accuracy of decision tree is : ', accdt)
    
    
    clfrf = DecisionTree(max_depth=10)
    clfrf.fit(X_train, y_train)
    predictions_rf = clfrf.predict(X_test)
    
    
    accrf = accuracy(y_test, predictions_rf)
    print('Accuracy of random forest is : ', accrf)

代码地址

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值