from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier#袋装算法
from sklearn.ensemble import RandomForestClassifier#随机森林
from sklearn.ensemble import ExtraTreesClassifier#极端随机树
from sklearn.ensemble import AdaBoostClassifier#AdaBoost,迭代算法
from sklearn.ensemble import GradientBoostingClassifier#随机梯度提升(GBM)
# 导入数据
filename = 'D:\example\MachineLearning-master\pima_data.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(filename, names=names)
# 将数据分为输入数据和输出结果
array = data.values
X = array[:, 0:8]
Y = array[:, 8]
num_folds = 10
seed = 7
kfold = KFold(n_splits=num_folds, random_state=seed)
cart = DecisionTreeClassifier()
num_tree = 100
model = BaggingClassifier(base_estimator=cart, n_estimators=num_tree, random_state=seed)#袋装算法
result = cross_val_score(model, X, Y, cv=kfold)
print('袋装算法:',result.mean())
kfold = KFold(n_splits=num_folds, random_state=seed)
num_tree = 100
max_features = 3
model = RandomForestClassifier(n_estimators=num_tree, random_state=seed, max_features=max_features)#随机森林
result = cross_val_score(model, X, Y, cv=kfold)
print('随机森林:',result.mean())
kfold = KFold(n_splits=num_folds, random_state=seed)
num_tree = 100
max_features = 3
model = ExtraTreesClassifier(n_estimators=num_tree, random_state=seed, max_features=max_features)# 极端随机树
result = cross_val_score(model, X, Y, cv=kfold)
print('极端随机树:',result.mean())
kfold = KFold(n_splits=num_folds, random_state=seed)
num_tree = 30
model = AdaBoostClassifier(n_estimators=num_tree, random_state=seed)
result = cross_val_score(model, X, Y, cv=kfold)#AdaBoost,迭代算法
print('AdaBoost,迭代算法:',result.mean())
kfold = KFold(n_splits=num_folds, random_state=seed)
num_tree = 30
model = GradientBoostingClassifier(n_estimators=num_tree, random_state=seed)
result = cross_val_score(model, X, Y, cv=kfold)#随机梯度提升
print('随机梯度提升:',result.mean())