# create the base pre-trained modelbase_model=InceptionV3(weights='imagenet',include_top=False)
base_model.summary()
# add a global spatial average pooling layerx=base_model.outputx=GlobalAveragePooling2D()(x)# let's add a fully-connected layerx=Dense(1024,activation='relu')(x)# and a logistic layer -- let's say we have 200 classespredictions=Dense(200,activation='softmax')(x)# this is the model we will trainmodel=Model(input=base_model.input,output=predictions)
# first: train only the top layers (which were randomly initialized)# i.e. freeze all convolutional InceptionV3 layersforlayerinbase_model.layers:layer.trainable=False
# compile the model (should be done *after* setting layers to non-trainable)model.compile(optimizer='rmsprop',loss='categorical_crossentropy')
model.fit_generator(....)
# at this point, the top layers are well trained and we can start fine-tuning# convolutional layers from inception V3. We will freeze the bottom N layers# and train the remaining top layers.# let's visualize layer names and layer indices to see how many layers# we should freeze:fori,layerinenumerate(base_model.layers):print(i,layer.name)
# we chose to train the top 2 inception blocks, i.e. we will freeze# the first 172 layers and unfreeze the rest:forlayerinmodel.layers[:172]:layer.trainable=Falseforlayerinmodel.layers[172:]:layer.trainable=True
# we need to recompile the model for these modifications to take effect# we use SGD with a low learning ratefromkeras.optimizersimportSGDmodel.compile(optimizer=SGD(lr=0.0001,momentum=0.9),loss='categorical_crossentropy')