keras 5_mnist数据集分类_dropout_正则化
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import SGD
from keras.regularizers import l2
# 载入数据集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print("x_train:", x_train.shape, "y_test:", y_train.shape)
# 将数据由(60000,28,28)转换为(60000,784),并进行归一化
x_train = x_train.reshape(x_train.shape[0], -1) / 255.0
x_test = x_test.reshape(x_test.shape[0], -1) / 255.0
# 将数据集标签改为one-hot编码的格式
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)
# 创建网络模型,输入是784个神经元,输出为10类
model = Sequential([
Dense(units=200,input_dim=784,bias_initializer="one",activation="relu",kernel_regularizer=l2(0.0003)),
# Dropout(0.5),
Dense(units=100,bias_initializer="one",activation="relu",kernel_regularizer=l2(0.0003)),
# Dropout(0.5),
Dense(units=10,bias_initializer="one",activation="softmax",kernel_regularizer=l2(0.0003))
])
# 定义优化器
sgd = SGD(lr=0.01)
# 编译模型,定义优化器,loss_function,训练过程计算准确率
model.compile(optimizer=sgd,loss="categorical_crossentropy",metrics=["accuracy"])
# 训练数据集
model.fit(x_train,y_train,batch_size=32,epochs=20)
# 评估模型
loss,accuracy = model.evaluate(x_test, y_test)
print("test loss:",loss,"accuracy:",accuracy)