Sklearn作业
作业截图
代码
from sklearn import datasets,cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
# 得到数据集
dataset = datasets.make_classification(n_samples=2000, n_features=15,
n_repeated=0, n_classes=2)
X, Y = dataset[0], dataset[1]
kf = cross_validation.KFold(len(Y), n_folds=10, shuffle=True) # 交叉检验划分
num = 1
for train_index, test_index in kf:
X_train, y_train = X[train_index], Y[train_index]
X_test, y_test = X[test_index], Y[test_index]
print("Test%d:"%num)
# 朴素贝叶斯
print("Naive Bayes:")
clf = GaussianNB()
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
print("Accuracy:", metrics.accuracy_score(y_test, pred))
print("F1-score:", metrics.f1_score(y_test, pred))
print("AUC ROC:",metrics.roc_auc_score(y_test, pred))
# 支持向量机
print("SVM:")
clf = SVC(C=1e-01, kernel='rbf', gamma=0.1)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
print("Accuracy:", metrics.accuracy_score(y_test, pred))
print("F1-score:", metrics.f1_score(y_test, pred))
print("AUC ROC:",metrics.roc_auc_score(y_test, pred))
# 随机森林
print("Random Forest:")
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
print("Accuracy:", metrics.accuracy_score(y_test, pred))
print("F1-score:", metrics.f1_score(y_test, pred))
print("AUC ROC:",metrics.roc_auc_score(y_test, pred))
print()
num += 1
结果截图
(共有十次测试,此处只放出四次的结果截图)
根据截图可知,在二分类问题中,对于相同的数据集,随机森林算法的效果比朴素贝叶斯和支持向量机的效果要好