'''
project:决策树
date:2018.08.22
'''
from math import log
def creatDataSet():
dataSet=[[1,1,'yes'],
[1,1,'yes'],
[1,0,'no'],
[0,1,'no'],
[0,1,'no']]
labels=['no surfacing','flippers']
'''
计算香农熵
'''
def calcShannonEnt(dataSet):
numlabels=len(dataSet)
labelCounts = {}
for feaVec in dataSet:
currentLabel=feaVec[-1]
if currentLabel not in labelCounts:
labelCounts[currentLabel]=0
labelCounts[currentLabel]+=1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numlabels
shannonEnt -= prob * log(prob,2)
return shannonEnt
'''
划分数据
'''
def splitDataSet(dataSet,axis,value):
retDataSet=[]
for dataLine in dataSet:
if dataLine[axis] == value:
retDataSetVec = dataLine[:axis]
retDataSetVec.extend(dataLine[axis+1:])
retDataSet.append(retDataSetVec)
return retDataSet
'''
选择最好的数据集划分方式
'''
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeaturn = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqfeatList = set(featList)
newEntropy = 0.0
for value in uniqfeatList:
subDataSet = splitDataSet(dataSet,i,value)
prob = len(subDataSet)/float(len(featList))
newEntropy += prob * log(prob,2)
infoGain = baseEntropy - newEntropy
if(infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeaturn = i
return bestFeaturn
if __name__ == '__main__':
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing', 'flippers']
print("最优特征索引值:" + str(chooseBestFeatureToSplit(dataSet)))
![这里写图片描述](https://i-blog.csdnimg.cn/blog_migrate/09a95df825d901131d013b7b2cc66d66.png)