决策树代码实现

# -*- coding: UTF-8 -*-
import matplotlib
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
from math import log
import operator


def createDataSet():
    dataSet = [[0, 0, 0, 0, 'no'],
               [0, 0, 0, 1, 'no'],
               [0, 1, 0, 1, 'yes'],
               [0, 1, 1, 0, 'yes'],
               [0, 0, 0, 0, 'no'],
               [1, 0, 0, 0, 'no'],
               [1, 0, 0, 1, 'no'],
               [1, 1, 1, 1, 'yes'],
               [1, 0, 1, 2, 'yes'],
               [1, 0, 1, 2, 'yes'],
               [2, 0, 1, 2, 'yes'],
               [2, 0, 1, 1, 'yes'],
               [2, 1, 0, 1, 'yes'],
               [2, 1, 0, 2, 'yes'],
               [2, 0, 0, 0, 'no']]
    labels = ['F1-AGE', 'F2-WORK', 'F3-HOME', 'F4-LOAN']
    return dataSet, labels


# 创建树模型数据,标签,按顺序排列的标签
def createTree(dataset, labels, featLabels):
    # 判断子节点里的所有标签是否都是一样的
    classList = [example[-1] for example in dataset]
    # 如果节点中的熵值为0
    if classList.count(classList[0]) == len(classList):
        # 返回结果
        return classList[0]
    # 如果dataset只剩下一个标签,即长度为1
    if len(dataset[0]) == 1:
        # 返回最多的的类别
        return majorityCnt(classList)
    # 最优的特征,选择最好的特征进行分割
    bestFeat = chooseBestFeatureToSplit(dataset)
    bestFeatLabel = labels[bestFeat]
    # 将已经使用过的标签加入到featLabels中
    featLabels.append(bestFeatLabel)
    myTree = {bestFeatLabel: {}}
    # 将当前的特征删掉
    del labels[bestFeat]
    # 获取特征值
    featValue = [example[bestFeat] for example in dataset]
    # 获取一个标签中属性值的个数
    uniqueVals = set(featValue)
    for value in uniqueVals:
        # 切分完之后的labels
        sublabels = labels[:]
        # 递归构建下一层树,将切分后的数据,切分后的labels以及featLabels传入进去
        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataset, bestFeat, value), sublabels, featLabels)
    return myTree


# 计算类别中数量最多的标签
def majorityCnt(classList):
    classCount = {}
    for vote in classList:
        # 如果没有,就创建它,如果有,个数就+1
        if vote not in classCount.keys():
            classCount[vote] = 0
        classCount[vote] += 1
    # 排序
    sortedclassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
    # 返回出现次数最多的
    return sortedclassCount[0][0]


# 选择最好的特征
def chooseBestFeatureToSplit(dataset):
    # numFeatures特征的个数,所有特征的个数减去一列‘yes’or‘no’
    numFeatures = len(dataset[0]) - 1
    # 获取基本熵值
    baseEntropy = calcShannonEnt(dataset)
    # 最好的信息增益
    bestInfoGain = 0
    # 最好的特征的索引
    bestFeature = -1
    # 遍历每一个特征
    for i in range(numFeatures):
        # 分别获取每一列的特征
        featList = [example[i] for example in dataset]
        uniqueVals = set(featList)
        newEntropy = 0
        # 计算熵值,遍历每一个特征里所有的值
        for val in uniqueVals:
            # 切分数据集
            subDataSet = splitDataSet(dataset, i, val)
            prob = len(subDataSet) / float(len(dataset))
            # 计算新的熵值
            newEntropy += prob * calcShannonEnt(subDataSet)
        infoGain = baseEntropy - newEntropy
        if (infoGain > bestInfoGain):
            bestInfoGain = infoGain
            bestFeature = i
    return bestFeature


# 切分数据集
def splitDataSet(dataset, idx, val):
    retDataSet = []
    for featVec in dataset:
        if featVec[idx] == val:
            #去掉当前这一列
            reducedFeatVec = featVec[:idx]
            reducedFeatVec.extend(featVec[idx + 1:])
            retDataSet.append(reducedFeatVec)
    return retDataSet


def calcShannonEnt(dataset):
    # 总的样本个数
    numexamples = len(dataset)
    labelCounts = {}
    # 获取yes和no的个数
    for featVec in dataset:
        currentlabel = featVec[-1]
        if currentlabel not in labelCounts.keys():
            labelCounts[currentlabel] = 0
        labelCounts[currentlabel] += 1
    # 计算熵值
    shannonEnt = 0
    for key in labelCounts:
        # 计算概率值
        prop = float(labelCounts[key]) / numexamples
        # 计算熵值
        shannonEnt -= prop * log(prop, 2)
    return shannonEnt


def getNumLeafs(myTree):
    numLeafs = 0
    firstStr = next(iter(myTree))
    secondDict = myTree[firstStr]
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            numLeafs += getNumLeafs(secondDict[key])
        else:
            numLeafs += 1
    return numLeafs


def getTreeDepth(myTree):
    maxDepth = 0
    firstStr = next(iter(myTree))
    secondDict = myTree[firstStr]
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            thisDepth = 1 + getTreeDepth(secondDict[key])
        else:
            thisDepth = 1
        if thisDepth > maxDepth: maxDepth = thisDepth
    return maxDepth


def plotNode(nodeTxt, centerPt, parentPt, nodeType):
    arrow_args = dict(arrowstyle="<-")
    font = FontProperties(fname=r"c:\windows\fonts\simsunb.ttf", size=14)
    createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
                            xytext=centerPt, textcoords='axes fraction',
                            va="center", ha="center", bbox=nodeType, arrowprops=arrow_args, FontProperties=font)


def plotMidText(cntrPt, parentPt, txtString):
    xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0]
    yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1]
    createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)


def plotTree(myTree, parentPt, nodeTxt):
    decisionNode = dict(boxstyle="sawtooth", fc="0.8")
    leafNode = dict(boxstyle="round4", fc="0.8")
    numLeafs = getNumLeafs(myTree)
    depth = getTreeDepth(myTree)
    firstStr = next(iter(myTree))
    cntrPt = (plotTree.xOff + (1.0 + float(numLeafs)) / 2.0 / plotTree.totalW, plotTree.yOff)
    plotMidText(cntrPt, parentPt, nodeTxt)
    plotNode(firstStr, cntrPt, parentPt, decisionNode)
    secondDict = myTree[firstStr]
    plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            plotTree(secondDict[key], cntrPt, str(key))
        else:
            plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW
            plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
            plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
    plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD


def createPlot(inTree):
    fig = plt.figure(1, facecolor='white')  # 创建fig
    fig.clf()  # 清空fig
    axprops = dict(xticks=[], yticks=[])
    createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)  # 去掉x、y轴
    plotTree.totalW = float(getNumLeafs(inTree))  # 获取决策树叶结点数目
    plotTree.totalD = float(getTreeDepth(inTree))  # 获取决策树层数
    plotTree.xOff = -0.5 / plotTree.totalW;
    plotTree.yOff = 1.0;  # x偏移
    plotTree(inTree, (0.5, 1.0), '')  # 绘制决策树
    plt.show()


if __name__ == '__main__':
    dataset, labels = createDataSet()
    featLabels = []
    myTree = createTree(dataset, labels, featLabels)
    createPlot(myTree)

注:仅用作自己学习使用,学习过程中记录的笔记

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值