Sklearn
- Create a classification dataset (n samples
≥
≥
1000, n features
≥
≥
10)
- Split the dataset using 10-fold cross validation
- Train the algorithms
- Evaluate the cross-validated performance I Accuracy
- Write a short report summarizing the methodology and the results
from sklearn import datasets
dataset = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=2, n_redundant=2, n_repeated=0, n_classes=2)
from sklearn import cross_validation
kf = cross_validation.KFold(len(iris.data), n_folds=10, shuffle=True)
for train_index, test_index in kf:
X_train, y_train = iris.data[train_index], iris.target[train_index]
X_test, y_test = iris.data[test_index], iris.target[test_index]
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
from sklearn.svm import SVC
clf = SVC(C=1e-01, kernel='rbf', gamma=0.1)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=6)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
from sklearn import metrics
acc = metrics.accuracy_score(y_test, pred)
print acc
f1 = metrics.f1_score(y_test, pred)
print f1
auc = metrics.roc_auc_score(y_test, pred)
print auc