对于决策树的参数可以多次做网格搜索,更加细致的分类,可以使准确率更好。
整体代码:
import pandas as pd
import numpy as np
# pip install missingno
import missingno as msno
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
data = pd.read_csv('zoo.csv')
print(data.head())
# 查看数据形状
print(data.shape)
# (101, 18)
# 查看数据类型分布
print(data.dtypes)
print(data.describe())
# 查看数据缺失情况
p=msno.bar(data)
plt.show()
# 画热力图,数值为两个变量之间的相关系数
plt.figure(figsize=(20,20))
p=sns.heatmap(data.corr(), annot=True, annot_kws = { 'fontsize' : 15 },square=True)
plt.show()
# 查看类别分布
print(pd.value_counts(data["class_type"]))
# 获取训练数据和标签
x_data = data.drop(['animal_name', 'class_type'], axis=1)
y_data = data['class_type']
from sklearn.model_selection import train_test_split
# 切分数据集,stratify=y表示切分后训练集和测试集中的数据类型的比例跟切分前y中的比例一致
# 比如切分前y中0和1的比例为1:2,切分后y_train和y_test中0和1的比例也都是1:2
x_train,x_test,y_train,y_test = train_test_split(x_data, y_data, test_size=0.3, stratify=y_data)
tree = DecisionTreeClassifier()
tree.fit(x_train, y_train)
print(tree.score(x_test, y_test))
# 0.9032258064516129
# 模型优化
param_grid = {'max_depth': [5,10,15,20,25],
'min_samples_split': [2,3,4,5,6],
'min_samples_leaf':[1,2,3,4]}
model = GridSearchCV(DecisionTreeClassifier(), param_grid, cv=3, iid=True)
model.fit(x_train, y_train)
print(model.best_estimator_)
# max_depth=15,min_samples_leaf=1, min_samples_split=2,
print(model.score(x_test, y_test))
# 0.967741935483871
param_grid = {'max_depth': [8,9,10,11,12],
'min_samples_split': [2,3,4,5,6],
'min_samples_leaf':[1,2,3,4]}
model2 = GridSearchCV(DecisionTreeClassifier(), param_grid, cv=3, iid=True)
model2.fit(x_train, y_train)
print(model2.best_estimator_)
# max_depth=8,min_samples_leaf=1, min_samples_split=2,
print(model2.score(x_test, y_test))
# 1.0