计算香农熵
相关函数
from math import log
import operator
def createDataSet():
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']] #列表
labels = ['no surfacing','flippers'] #不浮出水面,脚蹼
#change to discrete values
return dataSet, labels
def calcShannonEnt(dataSet):
numEntries = len(dataSet) #返回5
labelCounts = {} #Map
for featVec in dataSet: #the number of unique elements and their occurance
currentLabel = featVec[-1] #最后一个元素
if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0.0 #熵
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob * log(prob,2) #log base 2
return shannonEnt
测试代码
import MLIAdecisionTree
myDataSet, myLabels = MLIAdecisionTree.createDataSet()
print MLIAdecisionTree.calcShannonEnt(myDataSet)
myDataSet[0][-1] = 'maybe'
print MLIAdecisionTree.calcShannonEnt(myDataSet)
结果
0.970950594455
1.37095059445
选择最好的数据集划分方式(也就是按照哪个特征)----也就是选择最好的特征进行划分
相关函数
#按照给定特征划分数据集
def splitDataSet(dataSet, axis, value): #划分属性,获得去掉axis位置的属性value剩下的样本
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]#就是取1--(axis-1)
reducedFeatVec.extend(featVec[axis+1:]) #就是取得axis+1到最后的,然后合并
#也就是去掉了axis
retDataSet.append(reducedFeatVec)
return retDataSet
# print splitDataSet(myDataSet, 0, 0)
#选择最好的数据集划分方式(也就是按照哪个特征)----也就是选择最好的特征进行划分
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1 #the last column is used for the labels
baseEntropy = calcShannonEnt(dataSet) #数据集的经验熵
bestInfoGain = 0.0 #初始值
bestFeature = -1 #初始值
for i in range(numFeatures): #iterate over all the features
featList = [example[i] for example in dataSet]#create a list of all the examples of this feature 该特征的所有值
#该维特征的所有值
uniqueVals = set(featList) #get a set of unique values,集合---无序不重复
newEntropy = 0.0 #数据集的经验条件熵
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy #calculate the info gain; ie reduction in entropy
if (infoGain > bestInfoGain): #compare this to the best gain so far
bestInfoGain = infoGain #if better than current best, set to best
bestFeature = i
return bestFeature #returns an integer
测试代码
#划分数据集
print MLIAdecisionTree.splitDataSet(myDataSet, 0, 0)
print MLIAdecisionTree.splitDataSet(myDataSet, 0, 1)
#选择最好的数据集划分方式
print MLIAdecisionTree.chooseBestFeatureToSplit(myDataSet)
结果
[[1, 'no'], [1, 'no']]
[[1, 'yes'], [1, 'yes'], [0, 'no']]
0
递归构建决策树
相关函数
#创建决策树
def createTree(dataSet,labels): #数据集和标签列表[注意是特征的标签!]
classList = [example[-1] for example in dataSet] #最后一个,也就是类标签
if classList.count(classList[0]) == len(classList):
return classList[0]#stop splitting when all of the classes are equal
if len(dataSet[0]) == 1: #stop splitting when there are no more features in dataSet
return majorityCnt(classList) #此时无法返回一个类标签,所以选择的是通过返回出现次数最多的那个类标签
bestFeat = chooseBestFeatureToSplit(dataSet)
#针对当前数据集选择最优划分特征
bestFeatLabel = labels[bestFeat]
#获得对应的维度标签
myTree = {bestFeatLabel:{}}
#建立当前树结点,即维度标签及空值 ,注意这里的值依然是一个字典(就是结点),注意了!这里的树结构是用字典来表示的!!!
del(labels[bestFeat])
#将已选维度标签从标签列表中删除
featValues = [example[bestFeat] for example in dataSet]
#获取该维度的值
uniqueVals = set(featValues)
#获得不重复的值
for value in uniqueVals:
#对最优维度的每个值进行操作,一个值对应一个分支,没毛病
subLabels = labels[:] #copy all of labels, so trees don't mess up existing labels
#复制获得子维度标签,此时labels中已删除当前最优维度标签
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),subLabels)
#递归对每个分支建立子树。使用到了之前的splitdataset划分子树函数,递归是因为,除了参数不同外,其余过程是一模一样的
#递归就跟剥洋葱似的(一层一层),剥完为止! 次序是从左向右 P42 3-3图
return myTree
测试代码
#递归构建决策树
myTree = MLIAdecisionTree.createTree(myDataSet, myLabels)
print myTree
结果、
{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}}
绘制树节点
相关函数
import matplotlib.pyplot as plt
decisionNode = dict(boxstyle="sawtooth", fc="0.8") #�������У������ı���ͼ�ͷ��ʽ
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def plotNode(nodeTxt, centerPt, parentPt, nodeType): #���ƴ���ͷ��ע��
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
xytext=centerPt, textcoords='axes fraction',
va="center", ha="center", bbox=nodeType, arrowprops=arrow_args )
def createPlot1():
fig = plt.figure(1, facecolor='white')
fig.clf()
createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
plotNode('a decision node', (0.5, 0.1), (0.1, 0.5), decisionNode)
plotNode('a leaf node', (0.8, 0.1), (0.3, 0.8), leafNode)
plt.show()
测试代码
#绘制树节点
import treePlotter
treePlotter.createPlot1()
结果
构造注解树
相关函数
def plotMidText(cntrPt, parentPt, txtString): #在父子节点间填充文本信息
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)
def plotTree(myTree, parentPt, nodeTxt):#if the first key tells you what feat was split on
numLeafs = getNumLeafs(myTree) #this determines the x width of this tree
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0] #the text label for this node should be this
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
plotTree(secondDict[key],cntrPt,str(key)) #recursion
else: #it's a leaf node print the leaf node
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
#if you do get a dictonary you know it's a tree, and the first element will be another dict
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) #no ticks
#createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW; plotTree.yOff = 1.0;
plotTree(inTree, (0.5,1.0), '')
plt.show()
def retrieveTree(i): #预先存储的树信息,这里下面存储的有两个树结构,要注意了
listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
]
return listOfTrees[i]
测试代码
#绘制树
myTree = treePlotter.retrieveTree(0)
print myTree
print treePlotter.getNumLeafs(myTree)
print treePlotter.getTreeDepth(myTree)
treePlotter.createPlot(myTree)
结果
#如何在硬盘上存储及读取决策树分类器
相关函数
def storeTree(inputTree,filename): #��Ӳ���ϴ洢������������
import pickle
fw = open(filename,'w')
pickle.dump(inputTree,fw)
fw.close()
def grabTree(filename): #��Ӳ���϶�ȡ������������
import pickle
fr = open(filename)
return pickle.load(fr)
测试代码
trees.storeTree(myTree, 'classifierStorage.txt')
print trees.grabTree('classifierStorage.txt')
结果
{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}}
使用决策树预测隐形眼镜类型
测试代码
#隐形眼镜数据
fr = open('lenses.txt')
lenses = [inst.strip().split('\t') for inst in fr.readlines()]
lensesLabels = ['age','prescript','astigmatic','tearRate']
lensesTree = trees.createTree(lenses, lensesLabels)
print lensesTree
treePlotter.createPlot(lensesTree)
结果
{'tearRate': {'reduced': 'no lenses', 'normal': {'astigmatic': {'yes': {'prescript': {'hyper': {'age': {'pre': 'no lenses', 'presbyopic': 'no lenses', 'young': 'hard'}}, 'myope': 'hard'}}, 'no': {'age': {'pre': 'soft', 'presbyopic': {'prescript': {'hyper': 'soft', 'myope': 'no lenses'}}, 'young': 'soft'}}}}}}