机器学习实战之决策树ID3算法

决策树
ID3信息增益-熵C4.5信息增益率CART基尼系数+后剪枝
ID3算法
1先计算经验熵--(熵越高,则混合的数据也越多,即同一特征不同情况越多)
首先计算数据集中实例的总数
创建一个数据字典-每个键值都记录了当前类别出现的次数-出现的类别key-次数value
使用所有类标签的发生频率计算类别出现的概率-遍历key 次数/总数 累计 sum-=sum-log2(p)
2切分数据函数用于计算该特征下的经验熵返回第axis个特征是value的其他特征splitDataSet(dataSet, axis, value)
3选择最好的数据集划分方式-找出最大的信息增益的特征的列数
-遍历每个特征
-遍历每一列特征可能出现的值
-计算条件经验熵(递归调用计算经验熵(切分数据用于计算该特征下的经验熵))
条件经验熵--每个特征某情况出现概率*该情况下的经验熵
-计算信息增益-得出最大

4递归构建决策树


#coding:utf-8
from math import log
import operator

#创建数据集labels是特征名字
#myDat,labels=trees.createDataSet()
def createDataSet():
    dataSet = [[1, 1, 'yes'],
               [1, 1, 'yes'],
               [1, 0, 'no'],
               [0, 1, 'no'],
               [0, 1, 'no']]
    labels = ['no surfacing', 'flippers']
    # change to discrete values
    return dataSet, labels

#计算经验熵
#trees.calcShannonEnt(myDat)
def calcShannonEnt(dataSet):
    numEntries = len(dataSet)
    labelCounts = {}
    for featVec in dataSet:  # 遍历每一行数据-出现的类别key-次数value
        currentLabel = featVec[-1]
        if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1
    shannonEnt = 0.0
    for key in labelCounts:
        prob = float(labelCounts[key]) / numEntries  #计算每一类出现的次数/总个数
        shannonEnt -= prob * log(prob, 2)  # log base 2  #累计-log2(p)和--经验熵
    return shannonEnt

#待划分的数据集、划分数据集的特征、特征的返回值
#返回第axis个特征是value的其他特征trees.splitDataSet(myDat,0,1)
def splitDataSet(dataSet, axis, value):
    retDataSet = []
    for featVec in dataSet:
        if featVec[axis] == value:
            reducedFeatVec = featVec[:axis]  # chop out axis used for splitting
            reducedFeatVec.extend(featVec[axis + 1:])
            retDataSet.append(reducedFeatVec)
    return retDataSet

#选择最好的数据集划分方式-找出最大的信息增益的特征的列数
#trees.chooseBestFeatureToSplit(myDat)-输出0得出第0个特征是最好的用于划分数据集的特征
def chooseBestFeatureToSplit(dataSet):
    numFeatures = len(dataSet[0]) - 1  # the last column is used for the labels最后一个是labers
    baseEntropy = calcShannonEnt(dataSet)
    bestInfoGain = 0.0;
    bestFeature = -1
    for i in range(numFeatures):  # iterate over all the features遍历每个特征
        featList = [example[i] for example in dataSet]  # 每一列特征create a list of all the examples of this feature
        uniqueVals = set(featList)  # get a set of unique values
        newEntropy = 0.0
        for value in uniqueVals:  #遍历每一列特征可能出现的值
            subDataSet = splitDataSet(dataSet, i, value)
            prob = len(subDataSet) / float(len(dataSet))
            newEntropy += prob * calcShannonEnt(subDataSet)  #条件经验熵--每个特征某情况出现概率*该情况下的经验熵
        infoGain = baseEntropy - newEntropy  # 信息增益calculate the info gain; ie reduction in entropy
        if (infoGain > bestInfoGain):  # 选出最大信息增益compare this to the best gain so far
            bestInfoGain = infoGain  # if better than current best, set to best
            bestFeature = i
    return bestFeature  # returns an integer

#该函数使用分类名称的列表创建唯一值的数据字典-类标签出现的频率-排序返回出现次数最多的分类名称
#单特征对应类别
def majorityCnt(classList):
    classCount = {}
    for vote in classList:
        if vote not in classCount.keys(): classCount[vote] = 0
        classCount[vote] += 1
    sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]

#创建树的函数代码
def createTree(dataSet, labels):
    classList = [example[-1] for example in dataSet]
    if classList.count(classList[0]) == len(classList): #无分类元素
        return classList[0]  # stop splitting when all of the classes are equal
    if len(dataSet[0]) == 1:  # 只有一类stop splitting when there are no more features in dataSet
        return majorityCnt(classList)
    bestFeat = chooseBestFeatureToSplit(dataSet)
    bestFeatLabel = labels[bestFeat]
    myTree = {bestFeatLabel: {}}
    del (labels[bestFeat])
    featValues = [example[bestFeat] for example in dataSet]
    uniqueVals = set(featValues)
    for value in uniqueVals:
        subLabels = labels[:]  # copy all of labels, so trees don't mess up existing labels
        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)
    return myTree

#使用决策树的分类函数
#trees.classify(myTree,labels,[1,0])
def classify(inputTree, featLabels, testVec):
    firstStr = inputTree.keys()[0] #找出树的第一个分类特征
    secondDict = inputTree[firstStr]  #找出树的其他分类特征
    featIndex = featLabels.index(firstStr)  #找出树的第一个分类特征所在的位置
    key = testVec[featIndex]
    valueOfFeat = secondDict[key]  #找出经过第一个特征后的下一个特征
    if isinstance(valueOfFeat, dict):
        classLabel = classify(valueOfFeat, featLabels, testVec)
    else:
        classLabel = valueOfFeat
    return classLabel

#存储决策树-持久化分类器--保存字典
#trees.storeTree(myTree,'classifierStorage.txt')
def storeTree(inputTree, filename):
    import pickle
    fw = open(filename, 'w')
    pickle.dump(inputTree, fw)
    fw.close()
#加载分类器
#trees.grabTree('classifierStorage.txt')
def grabTree(filename):
    import pickle
    fr = open(filename)
    return pickle.load(fr)

# 使用示例
# fr=open('G:/python/pythonwork/ML/lenses.txt')
# lenses=[inst.strip().split('\t') for inst in fr.readlines()]
# lensesLabels=['age','prescript','astigmatic','tearRate']
# lensesTree=trees.createTree(lenses,lensesLabels)
# lensesTree
# treePlotter.createPlot(lensesTree)


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值