1、保存keras模型并重新导入,发现自己定义的损失函数不能用
ValueError: Unknown loss function:bp_mll_loss
解决:加一个custom_objects
# model.save('BPMLL_add_noise_less_fea_locate10.h5')
# from keras.models import load_model
# model = load_model('BPMLL_add_noise_less_fea_locate10.h5')
model.save('BPMLL_add_noise_less_fea_locate10.h5')
from keras.models import load_model
model = load_model('BPMLL_add_noise_less_fea_locate10.h5',custom_objects={'bp_mll_loss': bp_mll_loss})
2、keras将最好的迭代的性能的模型保存
3 code
def CNN(X_train,y_train,X_val,y_val,X_test,y_test):
print('***************************************************training************************************************')
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv1D, MaxPooling1D
from keras import backend as K
from keras.layers import LeakyReLU
from keras.layers.normalization import BatchNormalization
batch_size = 128
num_classes = 41
epochs = 200
input_shape = (1024,10)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv1D(filters = 32, kernel_size=16,padding='same',input_shape=input_shape))
model.add(LeakyReLU(alpha=0.01))
model.add(Conv1D(filters = 32, kernel_size=16,padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.01))
model.add(MaxPooling1D(pool_size=4, strides=4, padding='valid'))
model.add(Conv1D(filters = 64, kernel_size=16,padding='same'))
model.add(LeakyReLU(alpha=0.01))
model.add(Conv1D( filters = 64, kernel_size=16,padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.01))
model.add(MaxPooling1D(pool_size=4, strides=4, padding='valid'))
model.add(Conv1D(filters = 128, kernel_size=16,padding='same'))
model.add(LeakyReLU(alpha=0.01))
model.add(Conv1D( filters = 128, kernel_size=16,padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.01))
model.add(MaxPooling1D(pool_size=4, strides=4, padding='valid'))
model.add(Flatten())
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(128))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=5*10**-6, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),
metrics=['accuracy'])
model.summary()
early_stopping = keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=0.00001,
patience=500,
verbose=0,
mode='auto'
)
callbacks_list = [early_stopping]
model.fit(X_train, y_train,
batch_size=batch_size,
callbacks = callbacks_list,
epochs=epochs,
verbose=1,
validation_data=(X_val, y_val))
score = model.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
prediction = model.predict_classes(X_test, verbose=1)
prediction = keras.utils.to_categorical(prediction, num_classes)
from sklearn.metrics import accuracy_score
print ('************Accuracy of CNN classifier:',accuracy_score(y_test,prediction))
from sklearn.metrics import f1_score,recall_score,precision_score
print('Pre_micro = ',precision_score(y_test, prediction, average='micro'))
print('Rec_micro = ',recall_score(y_test, prediction, average='micro'))
print('F1_micro = ',f1_score(y_test, prediction, average='micro'))
from sklearn.metrics import classification_report
print (classification_report(y_test,prediction))
model.save('../model_40/cnn_add_noise.h5')