数据集下载:https://www.kaggle.com/c/the-nature-conservancy-fisheries-monitoring/data
环境搭建:
使用anaconda安装指定版本的keras:
conda install -c https://conda.anaconda.org/jaikumarm keras (keras不能大于2.2.0)
导入包:
from keras.applications.inception_v3 import InceptionV3
import os
from keras.layers import Flatten, Dense, AveragePooling2D
from keras.models import Model
from keras.optimizers import RMSprop, SGD
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
修改Google出的InceptionV3的模型,最后一层改为自己的类别:
learning_rate = 0.0001
img_width = 299
img_height = 299
nbr_train_samples = 3019
nbr_validation_samples = 758
nbr_epochs = 25
batch_size = 32
train_data_dir = '/Users/bappy/chris_chen/ML/keras_dnn/train_split'
val_data_dir = '/Users/bappy/chris_chen/ML/keras_dnn/val_split'
FishNames = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']
print('Loading InceptionV3 Weights ...')
InceptionV3_notop = InceptionV3(include_top=False, weights='imagenet',
input_tensor=None, input_shape=(299, 299, 3))
# Note that the preprocessing of InceptionV3 is:
# (x / 255 - 0.5) x 2
print('Adding Average Pooling Layer and Softmax Output Layer ...')
output = InceptionV3_notop.get_layer(index = -1).output # Shape: (8, 8, 2048)
output = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(output)
output = Flatten(name='flatten')(output)
output = Dense(8, activation='softmax', name='predictions')(output)
InceptionV3_model = Model(InceptionV3_notop.input, output)
#InceptionV3_model.summary()
开始训练模型:
optimizer = SGD(lr = learning_rate, momentum = 0.9, decay = 0.0, nesterov = True)
InceptionV3_model.compile(loss='categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
# autosave best Model
best_model_file = "./weights.h5"
best_model = ModelCheckpoint(best_model_file, monitor='val_acc', verbose = 1, save_best_only = True)
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.1,
zoom_range=0.1,
rotation_range=10.,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
# this is the augmentation configuration we will use for validation:
# only rescaling
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size = (img_width, img_height),
batch_size = batch_size,
shuffle = True,
# save_to_dir = '/Users/pengpai/Desktop/python/DeepLearning/Kaggle/NCFM/data/visualization',
# save_prefix = 'aug',
classes = FishNames,
class_mode = 'categorical')
validation_generator = val_datagen.flow_from_directory(
val_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
shuffle = True,
#save_to_dir = '/Users/pengpai/Desktop/python/DeepLearning/Kaggle/NCFM/data/visulization',
#save_prefix = 'aug',
classes = FishNames,
class_mode = 'categorical')
InceptionV3_model.fit_generator(
train_generator,
samples_per_epoch = nbr_train_samples,
nb_epoch = nbr_epochs,
validation_data = validation_generator,
nb_val_samples = nbr_validation_samples,
callbacks = [best_model])
模型最终准确率:95.7%
预测数据:
InceptionV3_model = load_model(weights_path)
def predict(img):
if img.size != target_size:
img = img.resize(target_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = InceptionV3_model.predict(x)
return preds[0]
predict(Image.open('/Users/bappy/myimg/images/'+all_imgs[0]))