首先导入所需的模块
#导入所需的模块
import time
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from keras.datasets import mnist
from sklearn import svm
导入数据集,如果本地没有的话会自动从服务器上下载到本地
#导入Keras提供的数据集MNIST模块
(x_train_image,y_train_label), (x_test_image,y_test_label) = mnist.load_data()
把28×28的矩阵转化为一维向量,并且将其归一化(很重要)
#转化(reshape)为一维向量,其长度为784,并设为Float数。
x_Train =x_train_image.reshape(60000, 784).astype('float32')
x_Test = x_test_image.reshape(10000, 784).astype('float32')
#将数据归一化
x_Train_normalize = x_Train / 255
x_Test_normalize = x_Test / 255
传递训练模型的参数
传递训练模型的参数
print(time.strftime('%Y-%m-%d %H:%M:%S'))
clf = svm.SVC(C=100.0, kernel='rbf', gamma=0.03)
进行模型训练
# 进行模型训练
t1 = time.time()
clf.fit(x_Train_normalize, y_train_label)
t2 = time.time()
SVMfit = float(t2-t1)
print("Time taken: {} seconds".format(SVMfit))
10分钟左右即可完成模型训练
Time taken: 614.0134041309357 seconds
查看结果
predictions = [int(a) for a in clf.predict(x_Test_normalize)]
#混淆矩阵
print(confusion_matrix(y_test_label, predictions))
#f1-score,precision,recall
print(classification_report(y_test_label, np.array(predictions)))
#计算准确度
print('accuracy=', accuracy_score(y_test_label, predictions))
print(time.strftime('%Y-%m-%d %H:%M:%S'))
结果如下:
[[ 974 0 1 0 0 2 0 1 2 0]
[ 0 1128 3 1 0 1 0 1 1 0]
[ 4 0 1017 0 1 0 0 7 3 0]
[ 0 0 2 997 1 2 0 4 3 1]
[ 0 0 2 0 969 0 4 0 1 6]
[ 2 0 0 5 1 877 3 1 2 1]
[ 3 2 0 0 2 2 948 0 1 0]
[ 0 3 8 1 1 0 0 1007 1 7]
[ 4 0 1 3 1 1 1 2 957 4]
[ 2 3 0 6 6 2 1 5 1 983]]
precision recall f1-score support
0 0.98 0.99 0.99 980
1 0.99 0.99 0.99 1135
2 0.98 0.99 0.98 1032
3 0.98 0.99 0.99 1010
4 0.99 0.99 0.99 982
5 0.99 0.98 0.99 892
6 0.99 0.99 0.99 958
7 0.98 0.98 0.98 1028
8 0.98 0.98 0.98 974
9 0.98 0.97 0.98 1009
micro avg 0.99 0.99 0.99 10000
macro avg 0.99 0.99 0.99 10000
weighted avg 0.99 0.99 0.99 10000
accuracy= 0.9857
2019-03-25 21:17:05
关于CNN识别在这里
xxxx:使用卷积神经网络(CNN)对手写数字数据集mnist进行识别的结果与代码实现zhuanlan.zhihu.com