from sklearn.neural_network import MLPClassifier
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report,confusion_matrix
digits = load_digits()
x_data = digits.data
y_data = digits.target
scaler = StandardScaler()
x_data = scaler.fit_transform(x_data)
x_train,x_test,y_train,y_test = train_test_split(x_data,y_data)
mlp = MLPClassifier(hidden_layer_sizes=(100,50) ,max_iter=500)
mlp.fit(x_train,y_train )
MLPClassifier(activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=(100, 50), learning_rate='constant',
learning_rate_init=0.001, max_iter=500, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=None,
shuffle=True, solver='adam', tol=0.0001, validation_fraction=0.1,
verbose=False, warm_start=False)
predictions = mlp.predict(x_test)
print(classification_report(y_test, predictions))
precision recall f1-score support
0 1.00 1.00 1.00 46
1 0.94 0.96 0.95 52
2 0.98 1.00 0.99 44
3 1.00 0.97 0.99 39
4 1.00 0.95 0.97 41
5 1.00 1.00 1.00 41
6 1.00 1.00 1.00 50
7 0.96 1.00 0.98 44
8 0.95 0.95 0.95 44
9 0.98 0.96 0.97 49
avg / total 0.98 0.98 0.98 450
print(confusion_matrix(y_test,predictions))
[[46 0 0 0 0 0 0 0 0 0]
[ 0 50 0 0 0 0 0 0 1 1]
[ 0 0 44 0 0 0 0 0 0 0]
[ 0 0 1 38 0 0 0 0 0 0]
[ 0 1 0 0 39 0 0 1 0 0]
[ 0 0 0 0 0 41 0 0 0 0]
[ 0 0 0 0 0 0 50 0 0 0]
[ 0 0 0 0 0 0 0 44 0 0]
[ 0 2 0 0 0 0 0 0 42 0]
[ 0 0 0 0 0 0 0 1 1 47]]