library(keras)
mnist <- dataset_mnist() x_train <- mnist$train$x y_train <- mnist$train$y x_test <- mnist$test$x y_test <- mnist$test$y #The x data is a 3-d array (images,width,height) of grayscale values . To prepare the data for training we convert the 3-d arrays into matrices by reshaping width and height into a single dimension (28x28 images are flattened into length 784 vectors) # reshape x_train <- array_reshape(x_train, c(nrow(x_train), 784)) x_test <- array_reshape(x_test, c(nrow(x_test), 784)) # rescale x_train <- x_train / 255 x_test <- x_test / 255 #The y data is an integer vector with values ranging from 0 to 9. To prepare this data for training we one-hot encode the vectors into binary class matrices using the Keras to_categorical() function: y_train <- to_categorical(y_train, 10) y_test <- to_categorical(y_test, 10) #Keras Model composed of a linear stack of layers: model <- keras_model_sequential() model %>%
layer_dense(units = 256, activation = 'relu', input_shape = c(784)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 128, activation = 'relu') %>%
layer_dropout(rate = 0.3) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
#This optimizer is usually a good choice for recurrent neural networks:
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = c('accuracy')
)
history <- model %>% fit(
x_train, y_train,
epochs = 30, batch_size = 128,
validation_split = 0.2
)
#The history object returned by fit() includes loss and accuracy metrics which we can plot:
plot(history)
#Evaluate the model’s performance on the test data:
model %>% evaluate(x_test, y_test)
#Generate predictions on new dataset:
model %>% predict_classes(x_test)
R interface to Keras-mnist
最新推荐文章于 2019-06-21 15:00:00 发布