一、Bagging
第一步,导入数据和库;
from sklearn import datasets
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
data = pd.read_excel('F:\\Desktop\\建模数据.xlsx')
data[:5]
第二步,数据处理;
X = data.iloc[:, 1:]
y = data.iloc[:, 0]
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
from sklearn.preprocessing import StandardScaler
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
第三步,Bagging;
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion='entropy',
max_depth=None,
random_state=1)
bag = BaggingClassifier(base_estimator=tree,
n_estimators=500,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
n_jobs=1,
random_state=1)
from sklearn.metrics import accuracy_score
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print('Decision tree train/test accuracies %.3f/%.3f'
% (tree_train, tree_test))
bag = bag.fit(X_train, y_train)
y_train_pred = bag.predict(X_train)
y_test_pred = bag.predict(X_test)
bag_train = accuracy_score(y_train, y_train_pred)
bag_test = accuracy_score(y_test, y_test_pred)
print('Bagging train/test accuracies %.3f/%.3f'
% (bag_train, bag_test))
第四步,模型评价;
(1)混淆矩阵
from sklearn.metrics import confusion_matrix
y_pred1 = bag.predict(X_test)
confmat1 = confusion_matrix(y_true=y_test, y_pred=y_pred1)
print(confmat1)
fig, ax = plt.subplots(figsize=(2.5, 2.5))
ax.matshow(confmat1, cmap=plt.cm.Blues, alpha=0.3)
for i in range(confmat1.shape[0]):
for j in range(confmat1.shape[1]):
ax.text(x=j, y=i, s=confmat1[i, j], va='center', ha='center')
plt.xlabel('预测类标')
plt.ylabel('真实类标')
plt.show()
![在这里插入图片描述](https://i-blog.csdnimg.cn/blog_migrate/719572de822cfeb78a6ad7c776aabb92.png)
(2)ROC曲线
from sklearn.metrics import roc_curve, auc
from scipy import interp
fig = plt.figure(figsize=(7, 5))
probas = tree.fit(X_train, y_train).predict_proba(X_test)
probas1 = bag.fit(X_train, y_train).predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(y_test, probas[:, 1], pos_label=1)
fpr1, tpr1, thresholds1 = roc_curve(y_test, probas1[:, 1], pos_label=1)
roc_auc = auc(fpr, tpr)
roc_auc1 = auc(fpr1, tpr1)
plt.plot(fpr, tpr, lw=1, color = 'b', label='决策树ROC (area = %0.2f)'
% ( roc_auc))
plt.plot(fpr1, tpr1, lw=1, color = 'y', label='BaggingROC (area = %0.2f)'
% ( roc_auc1))
plt.plot([0, 1], [0, 1], linestyle='--', color=(0.6, 0.6, 0.6), label='random guessing')
plt.plot([0, 0, 1],
[0, 1, 1],
lw=2,
linestyle=':',
color='black',
label='perfect performance')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('假正率')
plt.ylabel('真正率')
plt.title('')
plt.legend(loc="lower right")
plt.show()
![在这里插入图片描述](https://i-blog.csdnimg.cn/blog_migrate/79abf762ce2f111bc4afd186a73defbe.png)
二、Adaboost
第一步,Adaboost;
from sklearn.ensemble import AdaBoostClassifier
tree = DecisionTreeClassifier(criterion='entropy',
max_depth=1,
random_state=0)
ada = AdaBoostClassifier(base_estimator=tree,
n_estimators=500,
learning_rate=0.1,
random_state=0)
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print('Decision tree train/test accuracies %.3f/%.3f'
% (tree_train, tree_test))
ada = ada.fit(X_train, y_train)
y_train_pred = ada.predict(X_train)
y_test_pred = ada.predict(X_test)
ada_train = accuracy_score(y_train, y_train_pred)
ada_test = accuracy_score(y_test, y_test_pred)
print('AdaBoost train/test accuracies %.3f/%.3f'
% (ada_train, ada_test))
第二步,模型评价
(1)混淆矩阵
from sklearn.metrics import confusion_matrix
y_pred1 = ada.predict(X_test)
confmat1 = confusion_matrix(y_true=y_test, y_pred=y_pred1)
print(confmat1)
fig, ax = plt.subplots(figsize=(2.5, 2.5))
ax.matshow(confmat1, cmap=plt.cm.Blues, alpha=0.3)
for i in range(confmat1.shape[0]):
for j in range(confmat1.shape[1]):
ax.text(x=j, y=i, s=confmat1[i, j], va='center', ha='center')
plt.xlabel('预测类标')
plt.ylabel('真实类标')
plt.show()
![在这里插入图片描述](https://i-blog.csdnimg.cn/blog_migrate/d774fdd6de52d37c0fef15bb13a48f5a.png)
(2)ROC曲线
from sklearn.metrics import roc_curve, auc
from scipy import interp
fig = plt.figure(figsize=(7, 5))
probas = tree.fit(X_train, y_train).predict_proba(X_test)
probas1 = ada.fit(X_train, y_train).predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(y_test, probas[:, 1], pos_label=1)
fpr1, tpr1, thresholds1 = roc_curve(y_test, probas1[:, 1], pos_label=1)
roc_auc = auc(fpr, tpr)
roc_auc1 = auc(fpr1, tpr1)
plt.plot(fpr, tpr, lw=1, color = 'b', label='决策树ROC (area = %0.2f)'
% ( roc_auc))
plt.plot(fpr1, tpr1, lw=1, color = 'y', label='AdaROC (area = %0.2f)'
% ( roc_auc1))
plt.plot([0, 1], [0, 1], linestyle='--', color=(0.6, 0.6, 0.6), label='random guessing')
plt.plot([0, 0, 1], [0, 1, 1], lw=2, linestyle=':', color='black',label='perfect performance')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('假正率')
plt.ylabel('真正率')
plt.title('')
plt.legend(loc="lower right")
plt.show()
![在这里插入图片描述](https://i-blog.csdnimg.cn/blog_migrate/287dd904ae114bf0317ad225c0fb5e3f.png)