# UNQ_C1# GRADED FUNCTION: data_augmenterdefdata_augmenter():'''
Create a Sequential model composed of 2 layers
Returns:
tf.keras.Sequential
'''### START CODE HERE
data_augmentation = tf.keras.Sequential()
data_augmentation.add(RandomFlip('horizontal'))
data_augmentation.add(RandomRotation(0.2))### END CODE HEREreturn data_augmentation
# UNQ_C2# GRADED FUNCTIONdefalpaca_model(image_shape=IMG_SIZE, data_augmentation=data_augmenter()):''' Define a tf.keras model for binary classification out of the MobileNetV2 model
Arguments:
image_shape -- Image width and height
data_augmentation -- data augmentation function
Returns:
Returns:
tf.keras.model
'''
input_shape = image_shape +(3,)### START CODE HERE
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,# <== Important!!!!
weights='imagenet')# From imageNet# Freeze the base model by making it non trainable
base_model.trainable =False# create the input layer (Same as the imageNetv2 input size)
inputs = tf.keras.Input(shape=input_shape)# apply data augmentation to the inputs
x = data_augmentation(inputs)# data preprocessing using the same weights the model was trained on
x = tf.keras.applications.mobilenet_v2.preprocess_input(x)# set training to False to avoid keeping track of statistics in the batch norm layer
x = base_model(x, training=False)# Add the new Binary classification layers# use global avg pooling to summarize the info in each channel
x = tfl.GlobalAveragePooling2D()(x)#include dropout with probability of 0.2 to avoid overfitting
x = tfl.Dropout(rate=0.2)(x)# create a prediction layer with one neuron (as a classifier only needs one)
prediction_layer = tfl.Dense(1)### END CODE HERE
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)return model
# UNQ_C3
base_model = model2.layers[4]
base_model.trainable =True# Let's take a look to see how many layers are in the base modelprint("Number of layers in the base model: ",len(base_model.layers))# Fine-tune from this layer onwards
fine_tune_at =120### START CODE HERE# Freeze all the layers before the `fine_tune_at` layerfor layer in base_model.layers[:fine_tune_at]:
layer.trainable =True# Define a BinaryCrossentropy loss function. Use from_logits=True
loss_function=tf.python.keras.losses.BinaryCrossentropy(from_logits=True)# Define an Adam optimizer with a learning rate of 0.1 * base_learning_rate
optimizer = tf.keras.optimizers.Adam(lr=base_learning_rate*0.1)# Use accuracy as evaluation metric
metrics=['accuracy']### END CODE HERE
model2.compile(loss=loss_function,
optimizer = optimizer,
metrics=metrics)