我有一个数据集,包含要分类的2类的2万张黑白图像(这些图像有点像天气预报或股市图表,因此我不能使用预先训练的网络).数据集已分为18000张图像,用于
培训和2000张图像用于测试目的.我正在使用Keras卷积神经网络对其进行训练.我有96%的准确度
使用训练集但通过测试集获得的结果不好(50个历元后卡在82%至83%之间).我认为可能是由于过度拟合.请
你能建议我一些解决问题的方法吗?我保留最终输出和代码供您查看.
281/281 [==============================]-132s-损失:0.1024-acc:0.9612-val_loss:0.5836- val_acc:0.8210
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras.optimizers import SGD, RMSprop, Adam
from keras.callbacks import ModelCheckpoint
filepath="weights/weights-improvement-{epoch:02d}-{val_acc:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
img_width, img_height = 100, 1296
train_data_dir = 'dataset/train'
validation_data_dir = 'dataset/validation'
nb_train_samples = 18000
nb_validation_samples = 2000
epochs = 100
batch_size = 64
input_shape = (img_width, img_height, 1)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=0.001),
metrics=['accuracy'])
model.save('model.h5')
train_datagen = ImageDataGenerator(
rescale=None,
)
test_datagen = ImageDataGenerator(rescale=None)
train_generator = train_datagen.flow_from_directory(
train_data_dir, color_mode='grayscale',
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir, color_mode='grayscale',
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size,callbacks=callbacks_list)