1. 创建数据集
# 书上题目5.1 示例数据
def create_data():
datasets = [
['青年', '否', '否', '一般', '否'],
['青年', '否', '否', '好', '否'],
['青年', '是', '否', '好', '是'],
['青年', '是', '是', '一般', '是'],
['青年', '否', '否', '一般', '否'],
['中年', '否', '否', '一般', '否'],
['中年', '否', '否', '好', '否'],
['中年', '是', '是', '好', '是'],
['中年', '否', '是', '非常好', '是'],
['中年', '否', '是', '非常好', '是'],
['老年', '否', '是', '非常好', '是'],
['老年', '否', '是', '好', '是'],
['老年', '是', '否', '好', '是'],
['老年', '是', '否', '非常好', '是'],
['老年', '否', '否', '一般', '否'],
]
labels = [u'年龄', u'有工作', u'有自己的房子', u'信贷情况', u'类别']
# 返回数据集和每个维度的名称
return datasets, labels
datasets, labels = create_data()
train_data = pd.DataFrame(datasets, columns=labels)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
2. 定义节点类 二叉树
class Node:
# 初始化属性,在类创建对象时自动执行
def __init__(self, root=True, label=None, feature_name=None, feature=None):
self.root = root # 根节点
self.label = label # 类标签
self.feature_name = feature_name # 特征名称
self.feature = feature # 特征值
self.tree = {}
self.result = {
'label:': self.label,
'feature': self.feature,
'tree': self.tree
}
# 打印输出结果
def __repr__(self):
return '{}'.format(self.result)
def add_node(self, val, node):
self.tree[val] = node
def predict(self, features):
if self.root is True:
return self.label
return self.tree[features[self.feature]].predict(features) # ????
3. 构建ID3/C4.5决策树
class DTree:
# 初始化属性,在类创建对象时自动执行
def __init__(self, epsilon=0.1):
self.epsilon = epsilon
self._tree = {}
@staticmethod
# """
# @property:Python内置的@property装饰器就是负责把一个方法变成属性调用
# @staticmethod:返回函数的静态方法,该方法不强制要求传递参数
# """
# 计算经验熵H(D)
def calc_ent(datasets):
data_length = len(datasets)
label_count = {}
# 计算每个类别的样本个数
for i in range(data_length):
label = datasets[i][-1]
if label not in label_count:
label_count[label] = 0
label_count[label] += 1
# 计算样本熵值,对应公式中:H(X)
ent = -sum([(p / data_length) * math.log(p / data_length, 2)
for p in label_count.values()])
return ent
# 计算经验条件熵,对应公式中:H(X|Y)
def cond_ent(self, datasets, axis=0):
data_length = len(datasets)
feature_sets = {}
for i in range(data_length):
feature = datasets[i][axis]
if feature not in feature_sets:
feature_sets[feature] = []
feature_sets[feature].append(datasets[i])
#计算以第i个特征进行分类后的熵值,对应公式中:H(X|Y)
cond_ent = sum([(len(p) / data_length) * self.calc_ent(p)
for p in feature_sets.values()])
return cond_ent
# ID3算法:计算信息增益,对应公式中:g(X,Y)=H(X)-H(X|Y)
@staticmethod
def info_gain(ent, cond_ent):
return ent - cond_ent
# 特征选择:选择最好的特征划分数据集(包含ID3 与 C4.5算法)
def info_gain_train(self, datasets):
count = len(datasets[0]) - 1 # 特征个数
ent = self.calc_ent(datasets) # 训练集D信息熵
best_feature_ID3 = []
best_feature_C45 = []
for c in range(count):
c_info_gain = self.info_gain(ent, self.cond_ent(
datasets, axis=c)) # ID3算法:计算信息增益
c_feature_gain = c_info_gain / calc_ent(
np.array(datasets)[:, c:c + 1].tolist()) # C4.5算法:计算信息增益比
# print(' 特征({}) - info_gain - {:.3f}'.format(features[c], c_info_gain))
best_feature_ID3.append((c, c_info_gain))
best_feature_C45.append((c, c_feature_gain))
# 选取信息增益较大的特征
best_ID3 = max(best_feature_ID3, key=lambda x: x[-1])
best_C45 = max(best_feature_C45, key=lambda x: x[-1])
return best_ID3
# return best_C45
def train(self, train_data):
"""
input:数据集D(DataFrame格式),特征集A,阈值eta
output:决策树T
"""
_, y_train, features = train_data.iloc[:,:-1], train_data.iloc[:,-1], train_data.columns[:-1]
# 1,若D中实例属于同一类Ck,则T为单节点树,并将类Ck作为结点的类标记,返回T
if len(y_train.value_counts()) == 1:
return Node(root=True, label=y_train.iloc[0])
# 2,若A为空,则T为单节点树,将D中实例树最大的类Ck作为该节点的类标记,返回T
if len(features) == 0:
return Node(
root=True,
label=y_train.value_counts().sort_values(
ascending=False).index[0])
# 3,计算最大信息增益 同5.1,Ag为信息增益最大的特征
max_feature, max_info_gain = self.info_gain_train(np.array(train_data))
max_feature_name = features[max_feature]
# 4,Ag的信息增益小于阈值eta,则置T为单节点树,
# 并将D中是实例数最大的类Ck作为该节点的类标记,返回T
if max_info_gain < self.epsilon:
return Node(
root=True,
label=y_train.value_counts().sort_values(
ascending=False).index[0])
# 5,构建Ag子集
node_tree = Node(
root=False, feature_name=max_feature_name, feature=max_feature)
feature_list = train_data[max_feature_name].value_counts().index
for f in feature_list:
sub_train_df = train_data.loc[train_data[max_feature_name] ==
f].drop([max_feature_name], axis=1)
# 6, 递归生成树
sub_tree = self.train(sub_train_df)
node_tree.add_node(f, sub_tree)
return node_tree
def fit(self, train_data):
self._tree = self.train(train_data)
return self._tree
def predict(self, X_test):
return self._tree.predict(X_test)
datasets, labels = create_data()
data_df = pd.DataFrame(datasets, columns=labels)
dt = DTree()
tree = dt.fit(data_df)
tree
{'label:': None, 'feature': 2, 'tree': {'否': {'label:': None, 'feature': 1, 'tree': {'否': {'label:': '否', 'feature': None, 'tree': {}}, '是': {'label:': '是', 'feature': None, 'tree': {}}}}, '是': {'label:': '是', 'feature': None, 'tree': {}}}}
ID3算法的决策树基本实现完成,但是以字典方式表示的决策树不太直观,第6部分对决策树进行可视化。
dt.predict(['老年', '否', '否', '一般'])
'否'
4. ID3/C4.5决策树剪枝
决策树剪枝实现后期再补充…