用python实现id3_Python实现ID3算法

1 from __future__ importdivision2 importmath3

4 table = {'age': {'young', 'middle', 'old'}, 'income': {'high', 'middle', 'low'},5 'student': {'yes', 'no'}, 'credit': {'good', 'superior'}, 'buy computer': {'yes', 'no'}}6 attrIndex = {'age': 0, 'income': 1, 'student': 2, 'credit': 3, 'buy computer': 4}7 attrList = ['age', 'income', 'student', 'credit']8 allDataSet =[9 ['young', 'high', 'no', 'good', 'no'], ['young', 'high', 'no', 'superior', 'no'],10 ['middle', 'high', 'no', 'superior', 'yes'], ['old', 'middle', 'no', 'good', 'yes'],11 ['young', 'middle', 'no', 'good', 'no'], ['young', 'low', 'yes', 'good', 'yes'],12 ['middle', 'high', 'yes', 'good', 'yes'], ['old', 'middle', 'no', 'superior', 'no'],13 ['young', 'high', 'yes', 'good', 'yes'], ['middle', 'middle', 'no', 'good', 'no']14 ]15

16 #求熵

17 defentropy(attr, dataSet):18 valueCount = {v: {'yes': 0, 'no': 0, 'count': 0} for v intable[attr]}19 for row indataSet:20 vName =row[attrIndex[attr]]21 decAttrVal = row[attrIndex['buy computer']] #'yes' or 'no'

22 valueCount[vName]['count'] = valueCount[vName]['count'] + 1

23 valueCount[vName][decAttrVal] = valueCount[vName][decAttrVal] + 1

24 infoMap = {v: 0 for v intable[attr]}25 for v invalueCount:26 if valueCount[v]['count'] ==0:27 infoMap[v] =028 else:29 p1 = valueCount[v]['yes'] / valueCount[v]['count']30 p2 = valueCount[v]['no'] / valueCount[v]['count']31 infoMap[v] = - ((0 if p1 == 0 else p1 * math.log(p1, 2)) + (0 if p2 == 0 else p2 * math.log(p2, 2)))32 s =033 for v invalueCount:34 s = s + valueCount[v]['count']35 propMap = {v: (valueCount[v]['count'] / s) for v invalueCount}36 i =037 for v invalueCount:38 i = i + infoMap[v] *propMap[v]39 returni40

41 #定义节点的数据结构

42 classNode(object):43 def __init__(self, attrName):44 if attrName != '':45 self.attr =attrName46 self.childNodes = {v:Node('') for v intable[attrName]}47

48 #数据筛选

49 deffiltrate(dataSet, condition):50 result =[]51 for row indataSet:52 if row[attrIndex[condition['attr']]] == condition['val']:53 result.append(row)54 returnresult55 #求最大信息熵

56 defmaxEntropy(dataSet, attrList):57 if len(attrList) == 1:58 returnattrList[0]59 else:60 attr =attrList[0]61 maxE =entropy(attr, dataSet)62 for a inattrList:63 if maxE

67 defendBuild(dataSet):68 if len(dataSet) == 1:69 returnTrue70 buy = dataSet[0][attrIndex['buy computer']]71 for row indataSet:72 if buy != row[attrIndex['buy computer']]:73 returnFalse74 #构建决策树

75 defbuildDecisionTree(dataSet, root, attrList):76 if len(attrList) == 0 orendBuild(dataSet):77 root.attr = 'buy computer'

78 root.result = dataSet[0][attrIndex['buy computer']]79 root.childNodes ={}80 return

81 attr =root.attr82 for v inroot.childNodes:83 childDataSet = filtrate(dataSet, {"attr":attr, "val":v})84 if len(childDataSet) ==0:85 root.childNodes[v] = Node('buy computer')86 root.childNodes[v].result = 'no'

87 root.childNodes[v].childNodes ={}88 continue

89 else:90 childAttrList = [a for a inattrList]91 childAttrList.remove(attr)92 if len(childAttrList) ==0:93 root.childNodes[v] = Node('buy computer')94 root.childNodes[v].result = childDataSet[0][attrIndex['buy computer']]95 root.childNodes[v].childNodes ={}96 else:97 childAttr =maxEntropy(childDataSet, childAttrList)98 root.childNodes[v] =Node(childAttr)99 buildDecisionTree(childDataSet, root.childNodes[v], childAttrList)100 #预测结果

101 defpredict(root, row):102 if root.attr == 'buy computer':103 returnroot.result104 root =root.childNodes[row[attrIndex[root.attr]]]105 returnpredict(root, row)106

107 rootAttr =maxEntropy(allDataSet, attrList)108 rootNode =Node(rootAttr)109 printrootNode.attr110 buildDecisionTree(allDataSet, rootNode, attrList)111 print predict(rootNode, ['old', 'low', 'yes', 'good'])

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值