决策树,主要用来分类和回归
一、首先看下分类决策树
#载入鸢尾花数据集
from sklearn.datasets import load_iris
iris = load_iris()
X, y = iris.data, iris.target
features = iris.feature_names
#载入交叉验证莫模块
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import KFold
crossvalidation = KFold(n=X.shape[0], n_folds=5, shuffle=True, random_state=1)
#载入决策树
import numpy as np
from sklearn import tree
for depth in range(1,10):
tree_classifier = tree.DecisionTreeClassifier(max_depth=depth, random_state=0)
if tree_classifier.fit(X,y).tree_.max_depth < depth:
break
score = np.mean(cross_val_score(tree_classifier, X, y, scoring='accuracy', cv=crossvalidation, n_jobs=1))
print ('Depth: %i Accuracy: %.3f' % (depth,score))
# min_samples_leaf=3
# min_samples_split
结果显示最优方案为4层,即4个分叉的树
为优化结果,让分类方案更加简洁,可以设置树节点最小样本数。
tree_classifier = tree.DecisionTreeClassifier(min_samples_split=30, min_samples_leaf=10, random_state=0)
tree_classifier.fit(X,y)
score = np.mean(cross_val_score(tree_classifier, X, y, scoring='accuracy', cv=crossvalidation, n_jobs=1))
print ('Accuracy: %.3f' % score)
结果如下:精准度为0.913,且只有2层,
二、回归决策树
#构建回归树
#载入boston数据集
from sklearn.datasets import load_boston
boston = load_boston()
X, y = boston.data, boston.target
features = boston.feature_names
#使用回归决策树
from sklearn.tree import DecisionTreeRegressor
regression_tree = tree.DecisionTreeRegressor(min_samples_split=30, min_samples_leaf=10, random_state=0)
regression_tree.fit(X,y)
score = np.mean(cross_val_score(regression_tree, X, y, scoring='neg_mean_squared_error', cv=crossvalidation, n_jobs=1))
print('Mean squared error: %.3f' % abs(score))
均方差仅有11.4,效果比较好。