import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
data_matrix = np.loadtxt(open("table_11.csv", "rb"), delimiter=",", skiprows=0)
#np.random.shuffle(data_matrix)
X = data_matrix[:, :62]
y = data_matrix[:, 62:63].ravel()
#Build a forest and compute the feature importances
"""
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
"""
print(X.shape)
print(" ")
print(X)
forest = ExtraTreesClassifier(n_estimators =250,
random_state=0)
forest.fit(X,y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
#print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" %(f+1, indices[f], importances[indices[f]]))
#Plot the feature importance of the forest
plt.figure(figsize=(24,16))
plt.title("Feature importances")
plt.bar(range(X.shape[1]),importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1,X.shape[1]])
plt.show()