import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
# 导入数据
path = '../Data/iris.data'
data = pd.read_csv(path, names=['Sepal.Length','Sepal.Width','Petal.Length','Petal.Width','Species'])
data.head(10)
# 绘制数据散点图
data.plot(kind = 'scatter', x = 'Sepal.Length', y = 'Species')
data.plot(kind = 'scatter', x = 'Sepal.Width', y = 'Species')
data.plot(kind = 'scatter', x = 'Petal.Length', y = 'Species')
data.plot(kind = 'scatter', x = 'Petal.Width', y = 'Species')
# 映射函数iris_type: 将string的label映射至数字label
def iris_type(s):
class_label = {'Iris-setosa':0, 'Iris-versicolor':1, 'Iris-virginica':2}
return class_label[s]
Data = pd.read_csv(path,names=['Sepal.Length','Sepal.Width','Petal.Length','Petal.Width','Species'], converters = {4:iris_type})
Data.head(10)
# 变量初始化
# 最后一列为y,其余为x
cols = Data.shape[1] #列数 shape[0]行数 [1]列数
X = Data.iloc[:,0:cols-1] #取前cols-1列,即输入向量
y = Data.iloc[:,cols-1:cols] #取最后一列,即目标变量
X = np.array(X)
y = np.array(y)
y = y.flatten() # 对y进行降维
# 划分训练集和测试集
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3)
X_train.shape,y_train.shape
model = DecisionTreeClassifier(criterion = 'entropy')
'''
criterion = gini/entropy 可以用来选择用基尼指数或者熵来做损失函数。
splitter = best/random 用来确定每个节点的分裂策略。支持“最佳”或者“随机”。
max_depth = int 用来控制决策树的最大深度,防止模型出现过拟合。
min_samples_leaf = int 用来设置叶节点上的最少样本数量,用于对树进行修剪。
'''
model.fit(X_train,y_train)
result = model.predict(X_test)
print('正确率:',np.round(accuracy_score(result, y_test),2)*100,'%')
# 决策树可视化
from sklearn.tree import export_graphviz
import pydotplus
dot_data = export_graphviz(model, out_file=None)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_png('DecisionTree.png')