文章目录
1.集成学习简介
2.Voting
1)Voting能够提高准确度的原因
2)Voting的原理
硬投票分类器
软投票分类器
3)代码实现
# Voting代码的实现-硬、软投票分类器
from sklearn.datasets import make_moons
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
# 加载数据集
# n_samples表示样本点,noise表示噪声
X, y = make_moons(n_samples=7000, noise=0.1)
# plt.scatter(X[:,0],X[:,1])
# plt.show()
# 数据集的分割
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, random_state=42)
# 定义三个基本分类器
# 逻辑回归、决策树、SVM
lr = LogisticRegression()
dt = DecisionTreeClassifier()
# probability=True表示可以输出概率值
svm = SVC(probability=True)
# 定义投票分类器
# 软投票分类器要求基分类器都可以输入概率值
voting = VotingClassifier(
estimators=[('lr', lr), ('dt', 'dt'), ('svm', svm)],
voting='soft'
# voting = 'hard'
)
# 输出各个分类器的准确率
for clf in (lr, dt, svm,voting):
clf.fit(X_train, y_train)
y_hat = clf.predict(X_test)
print(clf.__class__.__name__, '=', accuracy_score(y_test, y_hat))
3.Bagging与随机森林
代码实现
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
# 加载数据集
iris = load_iris()
# 目标值与特征值
X = iris.data
y = iris.target
# bootstrap = True 为bagging,bootstrap=False为pasting
# max_samples设置为整数表示的就是采样的样本数,设置为浮点数表示的是max_samples*x.shape[0]
# 当样本数量N趋近于无穷大时,1-(1-(1/N))**N = 1-(e)**(-1) = 0.37表示bagging会有一部分数据取不到,这部分数据作为测试集
bag_clf = BaggingClassifier(
SVC(),
n_estimators=500,
bootstrap=True,
max_samples=1.0,
oob_score=True
)
bag_clf.fit(X, y)
# y_hat = bag_clf.predict(X)
# print(bag_clf.__class__.__name__, '=',accuracy_score(y,y_hat)) # BaggingClassifier 0.9733333333333334
# 用测试集得出的准确率,oob_score_
print(bag_clf.oob_score_) # 0.9733333333333334
# 基分类器是决策树,那么就是随机森林
bag_clf = BaggingClassifier(
DecisionTreeClassifier(),
n_estimators=500,
bootstrap=True,
max_samples=1.0,
)
bag_clf.fit(X, y)
y_hat = bag_clf.predict(X)
# 训练集的准确率
print(bag_clf.__class__.__name__, '=', accuracy_score(y, y_hat))
# sklearn对于随机森林也提供了直接实现随机森林的API
rnd_clf = RandomForestClassifier(n_estimators=500)
rnd_clf.fit(X, y)
y_hat = rnd_clf.predict(X)
print(rnd_clf.__class__.__name__