from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
from sklearn.model_selection import train_test_split
import pandas as pd
import graphviz
from sklearn.datasets import load_wine
wine = load_wine() # 返回的是一个字典
print(wine.data)
print(wine.target)
# 把data和target合并到一起
datas = pd.concat([pd.DataFrame(wine.data), pd.DataFrame(wine.target)], axis=1)
print(datas)
feature_names = wine.feature_names
target_names = wine.target_names
print(feature_names)
print(target_names)
# 切分数剧集
x_train, x_test, y_train, y_test = train_test_split(wine.data, wine.target, test_size=0.3)
# 建立模型
clf_01 = DecisionTreeClassifier(criterion='gini')
clf_02 = DecisionTreeClassifier(criterion='entropy')
clf_01 = clf_01.fit(x_train, y_train)
clf_02 = clf_02.fit(x_train, y_train)
score_01 = clf_01.score(x_test, y_test)
score_02 = clf_02.score(x_test, y_test)
print(score_01)
print(score_02)
# 画出一张图看一看
dot_data = tree.export_graphviz(clf_01
, out_file=None
, feature_names=feature_names
, class_names=target_names
, filled=True
, rounded=True
)
graph = graphviz.Source(dot_data)
# graph.view() pdf图片保存到当前目录
# 查看当前模型当中每一个咧=列,也就是特征的重要性权重
print(clf_01.feature_importances_)
print([*zip(feature_names, clf_01.feature_importances_)])
# 可以发现,决策树每次生成的模型选择的结点分类标签都不一样
# 这是因为,每一次我运行的时候,数据集都是随机分配的
# 而且,决策树并不会使用所有的特征,所以制定一个个常数。那么这个时候就不变化了
clf_03 = DecisionTreeClassifier(criterion='gini', random_state=30)
clf_03 = clf_03.fit(x_train, y_train)
score_03 = clf_03.score(x_test, y_test)
print(score_03)
print(clf_03.feature_importances_)
print([*zip(feature_names, clf_03.feature_importances_)])
# 另一个参数splitter,也是控制模型的随机性
clf_04 = DecisionTreeClassifier(criterion='gini', splitter='random')
clf4 = clf_04.fit(x_train, y_train)
score = clf_04.score(x_test, y_test)
print(score)