Keras搭建全连接神经网络,很方便
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.datasets import mnist
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
model = Sequential()
model.add(Dense(500,input_shape=(784,))) # 输入层, 28*28=784
model.add(Activation('tanh'))
model.add(Dropout(0.5)) # 50% dropout
model.add(Dense(500)) # 隐藏层, 500
model.add(Activation('tanh'))
model.add(Dropout(0.5)) # 50% dropout
model.add(Dense(10)) # 输出结果, 10
model.add(Activation('softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # 设定学习效率等参数
model.compile(loss = 'categorical_crossentropy', optimizer=sgd) # 使用交叉熵作为loss
(x_train,y_train),(x_test,y_test) = mnist.load_data() #使用mnist读取数据(第一次需要下载)
X_train = x_train.reshape(x_train.shape[0], x_train.shape[1]*x_train.shape[2])
X_test = x_test.reshape(x_test.shape[0],x_test.shape[1]*x_test.shape[2])
Y_train = (np.arange(10) == y_train[:,None]).astype(int) # 将index转换橙一个one_hot矩阵
Y_test = (np.arange(10) == y_test[:,None]).astype(int)
# 开始训练
# batch_size表示每个训练块包含的数据个数,epochs表示训练的次数,shuffle表示是否每次训练后将batch打乱重
# 排,verbose表示是否输出进度log,validation_split指定验证集占比
model.fit(X_train, Y_train, batch_size=200, epochs=100, shuffle=True, verbose=1, validation_split=0.3)
print("test set")
# 开始预测
scores = model.evaluate(X_test,Y_test,batch_size=200,verbose=1)
print("")
print("The test loss is %f" % scores)
result = model.predict(X_test,batch_size=200,verbose=1)
result_max = np.argmax(result, axis = 1)
test_max = np.argmax(Y_test, axis = 1)
result_bool = np.equal(result_max, test_max)
true_num = np.sum(result_bool)
print("")
print("The accuracy of the model is %f" % (true_num/len(result_bool)))