题目
代码:
from sklearn import datasets
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
import numpy as np
def NB(X_train, y_train, X_test):
clf = GaussianNB()
clf.fit(X_train, y_train)
return clf.predict(X_test)
def rbf_svm(X_train, y_train, X_test, C):
clf = SVC(C=C, kernel='rbf')
clf.fit(X_train, y_train)
return clf.predict(X_test)
def RFC(n_estimators, X_train, y_train, X_test):
clf = RandomForestClassifier(n_estimators=10)
clf.fit(X_train, y_train)
return clf.predict(X_test)
iris = datasets.load_iris()
dataset = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=2, n_redundant=2, n_repeated=0, n_classes=2)
kf = cross_validation.KFold(len(iris.data), n_folds=10, shuffle=True, random_state=1234)
nb_accuracy = []
nb_f1 = []
nb_auc_roc = []
svm_accuracy = []
svm_f1 = []
svm_auc_roc = []
rfc_accuracy = []
rfc_f1 = []
rfc_auc_roc = []
for train_index, test_index in kf:
X_train, y_train = dataset[0][train_index], dataset[1][train_index]
X_test, y_test = dataset[0][test_index], dataset[1][test_index]
#NB
pred = NB(X_train, y_train, X_test)
nb_accuracy.append(metrics.accuracy_score(y_test, pred))
nb_f1.append(metrics.f1_score(y_test, pred))
nb_auc_roc.append(metrics.roc_auc_score(y_test, pred))
#rbf_svm
nn = len(X_train)
bestC = None
Cvalues = [1e-2,1e-1, 1e0, 1e1, 1e2]
innerscore = []
for C in Cvalues:
ikf = cross_validation.KFold(nn, n_folds=5, shuffle=True, random_state=5678)
innerf1 = []
for t_index, v_index in ikf:
X_t, X_v = X_train[t_index], X_train[v_index]
y_t, y_v = y_train[t_index], y_train[v_index]
ipred = rbf_svm(X_t, y_t, X_v, C)
innerf1.append(metrics.f1_score(y_v, ipred))
innerscore.append(sum(innerf1)/len(innerf1))
bestC = Cvalues[np.argmax(innerscore)]
print('The bestC is {} with the inner f1 score {}'.format(bestC, max(innerscore)))
pred = rbf_svm(X_train, y_train, X_test, bestC)
svm_accuracy.append(metrics.accuracy_score(y_test, pred))
svm_f1.append(metrics.f1_score(y_test, pred))
svm_auc_roc.append(metrics.roc_auc_score(y_test, pred))
#RFC
nn = len(X_train)
best_nes = None
nes_values = [10, 100, 1000]
innerscore = []
for nes in nes_values:
ikf = cross_validation.KFold(nn, n_folds=5, shuffle=True, random_state=5678)
innerf1 = []
for t_index, v_index in ikf:
X_t, X_v = X_train[t_index], X_train[v_index]
y_t, y_v = y_train[t_index], y_train[v_index]
ipred = RFC(nes, X_t, y_t, X_v)
innerf1.append(metrics.f1_score(y_v, ipred))
innerscore.append(sum(innerf1)/len(innerf1))
best_nes = nes_values[np.argmax(innerscore)]
print('The best n_estimators value is {} with the inner f1 score {}'.format(best_nes, max(innerscore)))
pred = RFC(nes, X_train, y_train, X_test)
rfc_accuracy.append(metrics.accuracy_score(y_test, pred))
rfc_f1.append(metrics.f1_score(y_test, pred))
rfc_auc_roc.append(metrics.roc_auc_score(y_test, pred))
print('*****the cross-validated performance*****')
print('GaussianNB:')
print('Accuracy score:\n{}'.format(nb_accuracy))
print('average: {}'.format(sum(nb_accuracy)/len(nb_accuracy)))
print('F1-score:\n{}'.format(nb_f1))
print('average: {}'.format(sum(nb_f1)/len(nb_f1)))
print('AUC ROC score:\n{}'.format(nb_auc_roc))
print('average: {}'.format(sum(nb_auc_roc)/len(nb_auc_roc)))
print('*****')
print('SVC:')
print('Accuracy score:\n{}'.format(svm_accuracy))
print('average: {}'.format(sum(svm_accuracy)/len(svm_accuracy)))
print('F1-score:\n{}'.format(svm_f1))
print('average: {}'.format(sum(svm_f1)/len(svm_f1)))
print('AUC ROC score:\n{}'.format(svm_auc_roc))
print('average: {}'.format(sum(svm_auc_roc)/len(svm_auc_roc)))
print('*****')
print('RandomForestClassifier:')
print('Accuracy score:\n{}'.format(rfc_accuracy))
print('average: {}'.format(sum(rfc_accuracy)/len(rfc_accuracy)))
print('F1-score:\n{}'.format(rfc_f1))
print('average: {}'.format(sum(rfc_f1)/len(rfc_f1)))
print('AUC ROC score:\n{}'.format(rfc_auc_roc))
print('average: {}'.format(sum(rfc_auc_roc)/len(rfc_auc_roc)))
print('*****')
结果:
The bestC is 1.0 with the inner f1 score 0.8678749455945998
The best n_estimators value is 1000 with the inner f1 score 0.8962557909926332
The bestC is 1.0 with the inner f1 score 0.8445989304812833
The best n_estimators value is 10 with the inner f1 score 0.8725475181926795
The bestC is 1.0 with the inner f1 score 0.8518346271059742
The best n_estimators value is 10 with the inner f1 score 0.8443333333333332
The bestC is 1.0 with the inner f1 score 0.8619444444444445
The best n_estimators value is 100 with the inner f1 score 0.8623759305210917
The bestC is 1.0 with the inner f1 score 0.8324285083631727
The best n_estimators value is 10 with the inner f1 score 0.8402929245034508
The bestC is 1.0 with the inner f1 score 0.8563861266068397
The best n_estimators value is 10 with the inner f1 score 0.8607061772077251
The bestC is 1.0 with the inner f1 score 0.8197329552168261
The best n_estimators value is 1000 with the inner f1 score 0.8546247083225303
The bestC is 1.0 with the inner f1 score 0.8367141397501928
The best n_estimators value is 100 with the inner f1 score 0.8504655299721714
The bestC is 1.0 with the inner f1 score 0.8334529785192293
The best n_estimators value is 1000 with the inner f1 score 0.8514978242888482
The bestC is 1.0 with the inner f1 score 0.8269505800095344
The best n_estimators value is 100 with the inner f1 score 0.8705391012398799
*****the cross-validated performance*****
GaussianNB:
Accuracy score:
[0.8666666666666667, 0.7333333333333333, 0.8, 0.8, 0.7333333333333333, 0.8, 0.93
33333333333333, 0.7333333333333333, 0.9333333333333333, 0.8666666666666667]
average: 0.8200000000000001
F1-score:
[0.8571428571428571, 0.75, 0.8571428571428572, 0.7692307692307693, 0.75, 0.79999
99999999999, 0.9333333333333333, 0.7777777777777778, 0.9523809523809523, 0.88888
88888888888]
average: 0.8335897435897437
AUC ROC score:
[0.8888888888888888, 0.7321428571428571, 0.75, 0.8125, 0.75, 0.8035714285714286,
0.9375, 0.7222222222222223, 0.9, 0.8611111111111112]
average: 0.8157936507936508
*****
SVC:
Accuracy score:
[0.8, 0.7333333333333333, 0.8, 0.8666666666666667, 0.7333333333333333, 0.8, 0.93
33333333333333, 0.8, 0.8666666666666667, 0.8666666666666667]
average: 0.8200000000000001
F1-score:
[0.8, 0.75, 0.8695652173913044, 0.8571428571428571, 0.7777777777777778, 0.823529
411764706, 0.9333333333333333, 0.8421052631578948, 0.9090909090909091, 0.8888888
888888888]
average: 0.8451433658547671
AUC ROC score:
[0.8333333333333334, 0.7321428571428571, 0.7, 0.875, 0.7222222222222223, 0.79464
28571428572, 0.9375, 0.7777777777777778, 0.8, 0.8611111111111112]
average: 0.8033730158730158
*****
RandomForestClassifier:
Accuracy score:
[0.8, 0.7333333333333333, 0.8, 0.8666666666666667, 0.8, 0.8, 1.0, 0.8, 0.9333333
333333333, 0.8]
average: 0.8333333333333334
F1-score:
[0.7692307692307692, 0.75, 0.8695652173913044, 0.8571428571428571, 0.8, 0.823529
411764706, 1.0, 0.8421052631578948, 0.9523809523809523, 0.823529411764706]
average: 0.8487483882833191
AUC ROC score:
[0.8055555555555556, 0.7321428571428571, 0.7, 0.875, 0.8333333333333333, 0.79464
28571428572, 1.0, 0.7777777777777778, 0.9, 0.8055555555555555]
average: 0.8224007936507937
*****
分析:在选择参数的过程中使用了使分数最大的参数,共10次样例。机器学习的这些函数使用在课件上都有给出,而且过程也比较清晰,只要在10次样例中分别进行性能评估即可。