'''
project:决策树
date:2018.08.22
'''
from math import log
def creatDataSet():
dataSet=[[1,1,'yes'],
[1,1,'yes'],
[1,0,'no'],
[0,1,'no'],
[0,1,'no']]
labels=['no surfacing','flippers']
'''
计算香农熵
'''
def calcShannonEnt(dataSet): #香农熵定义
numlabels=len(dataSet)
labelCounts = {} #定义字典记录各,key为标签,value为对应标签出现次数
for feaVec in dataSet:
currentLabel=feaVec[-1]
if currentLabel not in labelCounts:
labelCounts[currentLabel]=0 #若字典中没有该标签,则添加进字典中,在下一行代码记录一次
labelCounts[currentLabel]+=1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numlabels
shannonEnt -= prob * log(prob,2)
return shannonEnt
'''
划分数据
'''
def splitDataSet(dataSet,axis,value):
retDataSet=[] #创建列表
for dataLine in dataSet:
if dataLine[axis] == value: #找出第axis (从0开始数)个特征对应的值与和我们指定的value相等的行
retDataSetVec = dataLine[:axis]
retDataSetVec.extend(dataLine[axis+1:]) #将找到的行切割掉特征所在的列切割并将结果存放在新创建的列表中
retDataSet.append(retDataSetVec)
return retDataSet
'''
选择最好的数据集划分方式
'''
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeaturn = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet] #获取dataSet中所有第i个特征值
uniqfeatList = set(featList)
newEntropy = 0.0
for value in uniqfeatList:
subDataSet = splitDataSet(dataSet,i,value)
prob = len(subDataSet)/float(len(featList))
newEntropy += prob * log(prob,2)
infoGain = baseEntropy - newEntropy
if(infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeaturn = i
return bestFeaturn
if __name__ == '__main__':
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing', 'flippers']
print("最优特征索引值:" + str(chooseBestFeatureToSplit(dataSet)))
机器学习实战——决策树练习
最新推荐文章于 2023-07-02 20:14:57 发布