青岛理工大学 数据挖掘与大数据分析 决策树实验
目录
一、实验说明
1、实验目的
掌握决策树分类算法的原理、模型构建及模型评估
2、实验环境
Python 3、Numpy及Pandas 库、Pycharm、scikit-learn库
3、实验步骤
1)读取鸢尾花数据集“iris.data.txt”
2)对数据集进行训练集与测试集的随机分割,训练:70%,测试30%
3)构建决策树分类模型,采用二路划分,实现基于度量(包括Gini指标和信息增益)的属性划分,并迭代构建决策树,
4)评估决策树分类模型,利用上一步构建的决策树,对测试集中的样本进行分类,并计算分类准确率
5)利用scikit-learn库中已有的决策树类DecisionTreeClassifier对上述的训练集进行拟合,在测试集上计算分类准确率,比较两个模型准确率
注意事项
1)除DecisionTreeClassifier类外,不能用scikit-learn库
2)可自主设置不同的决策树生长停止条件,如决策树深度、结点记录数阈值、不纯性度量的增益阈值等,并比较分析不同条件下生成的决策树模型性能
3)可尝试对数据集进行随机与非随机分割,观察分析模型性能
4)实验代码要有具体详细的注释,说明每一部分的功能
二、实现代码
以下为部分可以执行代码,可在jupyter运行。
#!/usr/bin/env python
# coding: utf-8
#author:191_zx
author:
# In[1]:
import numpy as np
import pandas as pd
import math
import copy
from math import log
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
import random
from sklearn import tree
from sklearn.metrics import precision_score
# In[2]:
data = pd.read_csv(r'iris.data.txt',delimiter=',',names = ['萼片长度','萼片宽度','花瓣长度','花瓣宽度','分类'])
# In[3]:
df = pd.DataFrame(data)
from sklearn.model_selection import train_test_split
X = df[['萼片长度','萼片宽度','花瓣长度','花瓣宽度']]
Y = df[['分类']]
X = np.array(X).tolist()#将数据集转换成列表
Y = np.array(Y).tolist()#将数据集转换成列表
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.3)
# In[4]:
x = np.array(df).tolist()#将数据集转换成列表
# In[5]:
def combine(dataset):
list = []
for i in dataset:
list.append(i[0])
return list
# In[6]:
y_train = combine(y_train)
y_test = combine(y_test)
# In[7]:
def Count(x):
'''
输入类型:特征属性序列
返回类型:字典
输入:特征属性集
返回:特征属性字典统计
'''
label = {}
for i in range(len(x)):
label[x[i][0]] = label.get(x[i][0],0) + 1
return label
# In[8]:
random.shuffle(x)#打乱数据集
# In[9]:
def set_split(X,test_size):
'''
划分数据集;
输入:
X:数据集列表
test_size:测试集大小[0,1]区间内,(占总数据百分比)
输出:
返回x_train,x_test,y_train,y_test,类型均为list
'''
Li = copy.deepcopy(X)
Test = []
Train = []
x_train = []
x_test = []
y_train = []
y_test = []
random.shuffle(Li)
Test_size = round(test_size*len(X))#测试集大小
Train_size = len(X) - Test_size #训练集大小
while len(Li)>0:
while len(Li)>Train_size:
Test.append(Li.pop())
Train.append(Li.pop())
for i in Test:
y_test.append(i[4])
i.pop()
for i in Train:
y_train.append(i[4])
i.pop()
return Train,Test,y_train,y_test
# In[10]:
def combine_xy(X_data,Y_data):
'''
输入 特征向量和目标向量列表
返回 合并后的列表
'''
li = copy.deepcopy(X_data)
#生成目标属性和特征属性集
i = len(li) - 1
while i>=0:
li[i].append(Y_data[i])
i -= 1
return li
# In[11]:
Test = combine_xy(x_test,y_test)#合并后的测试集
Train = combine_xy(x_train,y_train)#合并后的训练集
# In[12]:
dtc=DecisionTreeClassifier()#scikit-learn库中的决策树
# In[13]:
dtc.fit(x_train,y_train)#喂入训练数据
# In[14]:
y_predict=dtc.predict(x_test)#预测结果
y_predict
# In[15]:
def score_predict(predict,target):
'''
输入预测结果列表和目标属性列表
输出:准确率[0,1.0]
'''
num = len(predict)
total = 0
for i in range(len(predict)):
if predict[i] == target[i]:
total += 1
return total/num
# In[16]:
score = score_predict(y_predict,y_test)#scikit-learn库决策树预测准确率
score
# In[17]:
def opt_index_entropy(dataSet):
'''
输入特征属性和目标属性,输出最适合的划分属性和值
返回:最佳划分属性的索引、最佳划分属性索引和对应的划分值,小于值为一类,大于等于值为一类
'''
li = copy.deepcopy(dataSet)
#计算所有可能的熵
entropy_index = [] #所有划分下的熵
entropy = cal_entropy(li)#未划分前的熵
for x in range(len(li[0])-1):
li.sort(key = lambda a :a[x])
entropy_index.append([])
for i in range(len(li)):
set_a = li[:i]
set_b = li[i:]
entropy_index[x].append(len(set_a)/len(li)*cal_entropy(set_a)+len(set_b)/len(li)*cal_entropy(set_b))
#找出最佳划分属性和划分值
a = []#存放每个属性的最小划分熵
for i in entropy_index:
a.append(min(i))
M = int(a.index(min(a))) #最小划分属性索引
M2 = entropy_index[M].index(min(a)) #最小划分属性对应最小熵的索引
li.sort(key = lambda a :a[M])#将最小熵的属性排序,找到划分值
target = li[M2][M]
return M,target#返回最佳划分特征属性索引,以及最小熵划分对应的划分值
# In[18]:
def cal_entropy(dataSet):
'''
输入data ,表示带最后标签列的数据集
计算给定数据集总的信息熵
'''
if len(dataSet) == 0:
return 0
numEntries = len(dataSet)
labelCounts = {}#存放统计的标签
for featVec in dataSet:
label = featVec[-1]
if label not in labelCounts.keys():
labelCounts[label] = 0
labelCounts[label] += 1
entropy = 0.0
for key in labelCounts.keys():
p_i = float(labelCounts[key]/numEntries)
entropy -= p_i * log(p_i,2)#计算熵
return entropy
# In[19]:
#author:191_zx
def Decision_tree(dataSet):
'''
通过训练数据得到决策树
输入:训练集
返回值:训练得到的决策树,字典结构
'''
#递归结束条件
if len(dataSet) == 0:
return
elif cal_entropy(dataSet) == 0.0:
return dataSet[0][-1]
elif len(dataSet[0]) == 1:#若所有属性均分完,将数量最多的作为节点的分类结果
dic = Count(dataSet)
return max(dic)
#初始化决策树
li = copy.deepcopy(dataSet)
dec = {'index':0,'val':0.0,'entropy':0.0,'samples':10,'left':{},'right':{}} #存储数据结构
dec['entropy'] = cal_entropy(li)
dec['index'],dec['val'] = opt_index_entropy(dataSet)
dec['samples'] = len(li)
#根据最佳属性进行二元划分
left_set = []
right_set = []
for i in li:
if i[dec.get('index')] < dec.get('val'):
left_set.append(i)
else:
right_set.append(i)
#去掉最佳划分属性
for i in left_set:
del i[dec.get('index')]
for i in right_set:
del i[dec.get('index')]
#如果都归为一类,则删去此层,继续递归
if len(left_set) == len(li):
return Decision_tree(left_set)
if len(right_set) == len(li):
return Decision_tree(right_set)
dec['left'] = Decision_tree(left_set)
dec['right'] = Decision_tree(right_set)
return dec
# In[20]:
tree = Decision_tree(Train)
# In[21]:
tree
# In[22]:
def predict_tree(dec,Test):
'''
通过训练的决策树,对测试集进行测试,得到预测结果
输入:dec为训练集生成的决策树,为字典类型,Test为带目标向量的测试集
输出:预测结果列表
'''
labelCounts = {}
for featVec in Test:
label = featVec[-1]
if label not in labelCounts.keys():
labelCounts[label] = 0
labelCounts[label] += 1
y = []
for i in range(len(Test)):
tree = dec
while isinstance(tree,dict):
if Test[i][dec['index']] < dec['val']:
tree = tree['left']
else:
tree = tree['right']
y.append(tree)
return y
# In[23]:
y = predict_tree(tree,Test)
# In[24]:
#author:191_zx
score2 = score_predict(y,y_test)#决策树准确率
score2
三、测试数据
iris.data.txt
5.1,3.5,1.4,0.2,Iris-setosa
4.9,3.0,1.4,0.2,Iris-setosa
4.7,3.2,1.3,0.2,Iris-setosa
4.6,3.1,1.5,0.2,Iris-setosa
5.0,3.6,1.4,0.2,Iris-setosa
5.4,3.9,1.7,0.4,Iris-setosa
4.6,3.4,1.4,0.3,Iris-setosa
5.0,3.4,1.5,0.2,Iris-setosa
4.4,2.9,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5.4,3.7,1.5,0.2,Iris-setosa
4.8,3.4,1.6,0.2,Iris-setosa
4.8,3.0,1.4,0.1,Iris-setosa
4.3,3.0,1.1,0.1,Iris-setosa
5.8,4.0,1.2,0.2,Iris-setosa
5.7,4.4,1.5,0.4,Iris-setosa
5.4,3.9,1.3,0.4,Iris-setosa
5.1,3.5,1.4,0.3,Iris-setosa
5.7,3.8,1.7,0.3,Iris-setosa
5.1,3.8,1.5,0.3,Iris-setosa
5.4,3.4,1.7,0.2,Iris-setosa
5.1,3.7,1.5,0.4,Iris-setosa
4.6,3.6,1.0,0.2,Iris-setosa
5.1,3.3,1.7,0.5,Iris-setosa
4.8,3.4,1.9,0.2,Iris-setosa
5.0,3.0,1.6,0.2,Iris-setosa
5.0,3.4,1.6,0.4,Iris-setosa
5.2,3.5,1.5,0.2,Iris-setosa
5.2,3.4,1.4,0.2,Iris-setosa
4.7,3.2,1.6,0.2,Iris-setosa
4.8,3.1,1.6,0.2,Iris-setosa
5.4,3.4,1.5,0.4,Iris-setosa
5.2,4.1,1.5,0.1,Iris-setosa
5.5,4.2,1.4,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
5.0,3.2,1.2,0.2,Iris-setosa
5.5,3.5,1.3,0.2,Iris-setosa
4.9,3.1,1.5,0.1,Iris-setosa
4.4,3.0,1.3,0.2,Iris-setosa
5.1,3.4,1.5,0.2,Iris-setosa
5.0,3.5,1.3,0.3,Iris-setosa
4.5,2.3,1.3,0.3,Iris-setosa
4.4,3.2,1.3,0.2,Iris-setosa
5.0,3.5,1.6,0.6,Iris-setosa
5.1,3.8,1.9,0.4,Iris-setosa
4.8,3.0,1.4,0.3,Iris-setosa
5.1,3.8,1.6,0.2,Iris-setosa
4.6,3.2,1.4,0.2,Iris-setosa
5.3,3.7,1.5,0.2,Iris-setosa
5.0,3.3,1.4,0.2,Iris-setosa
7.0,3.2,4.7,1.4,Iris-versicolor
6.4,3.2,4.5,1.5,Iris-versicolor
6.9,3.1,4.9,1.5,Iris-versicolor
5.5,2.3,4.0,1.3,Iris-versicolor
6.5,2.8,4.6,1.5,Iris-versicolor
5.7,2.8,4.5,1.3,Iris-versicolor
6.3,3.3,4.7,1.6,Iris-versicolor
4.9,2.4,3.3,1.0,Iris-versicolor
6.6,2.9,4.6,1.3,Iris-versicolor
5.2,2.7,3.9,1.4,Iris-versicolor
5.0,2.0,3.5,1.0,Iris-versicolor
5.9,3.0,4.2,1.5,Iris-versicolor
6.0,2.2,4.0,1.0,Iris-versicolor
6.1,2.9,4.7,1.4,Iris-versicolor
5.6,2.9,3.6,1.3,Iris-versicolor
6.7,3.1,4.4,1.4,Iris-versicolor
5.6,3.0,4.5,1.5,Iris-versicolor
5.8,2.7,4.1,1.0,Iris-versicolor
6.2,2.2,4.5,1.5,Iris-versicolor
5.6,2.5,3.9,1.1,Iris-versicolor
5.9,3.2,4.8,1.8,Iris-versicolor
6.1,2.8,4.0,1.3,Iris-versicolor
6.3,2.5,4.9,1.5,Iris-versicolor
6.1,2.8,4.7,1.2,Iris-versicolor
6.4,2.9,4.3,1.3,Iris-versicolor
6.6,3.0,4.4,1.4,Iris-versicolor
6.8,2.8,4.8,1.4,Iris-versicolor
6.7,3.0,5.0,1.7,Iris-versicolor
6.0,2.9,4.5,1.5,Iris-versicolor
5.7,2.6,3.5,1.0,Iris-versicolor
5.5,2.4,3.8,1.1,Iris-versicolor
5.5,2.4,3.7,1.0,Iris-versicolor
5.8,2.7,3.9,1.2,Iris-versicolor
6.0,2.7,5.1,1.6,Iris-versicolor
5.4,3.0,4.5,1.5,Iris-versicolor
6.0,3.4,4.5,1.6,Iris-versicolor
6.7,3.1,4.7,1.5,Iris-versicolor
6.3,2.3,4.4,1.3,Iris-versicolor
5.6,3.0,4.1,1.3,Iris-versicolor
5.5,2.5,4.0,1.3,Iris-versicolor
5.5,2.6,4.4,1.2,Iris-versicolor
6.1,3.0,4.6,1.4,Iris-versicolor
5.8,2.6,4.0,1.2,Iris-versicolor
5.0,2.3,3.3,1.0,Iris-versicolor
5.6,2.7,4.2,1.3,Iris-versicolor
5.7,3.0,4.2,1.2,Iris-versicolor
5.7,2.9,4.2,1.3,Iris-versicolor
6.2,2.9,4.3,1.3,Iris-versicolor
5.1,2.5,3.0,1.1,Iris-versicolor
5.7,2.8,4.1,1.3,Iris-versicolor