# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
return model
Create the model by calling the function above
Compile the model by calling model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])
Train the model on train data by calling model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)
Test the model on test data by calling model.evaluate(x = ..., y = ...)
#Implement step 1, i.e. create the model.
happyModel = HappyModel((64,64,3))
#Implement step 2, i.e. compile the model to configure the learning process
happyModel.compile(optimizer='adam', loss="binary_crossentropy", metrics=['accuracy'])
#Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
happyModel.fit(x=X_train, y=Y_train, epochs=10, batch_size=20)
#Implement step 4, i.e. test/evaluate the model.
preds = happyModel.evaluate(x=X_test, y=Y_test,)