tensorflow二分类问题(4)训练过程

train.py(懒得想标题了)

话不多说直接上程序了。

import argparse
import keras
import tensorflow as tf
from keras.callbacks import ModelCheckpoint,EarlyStopping,ReduceLROnPlateau
from keras.utils import multi_gpu_model
from scipyorder import train_gen,valid_gen
from model import build_model
from utils import get_lowest_loss, get_best_model


patience=50
epochs=500
num_train_samples=3712
num_valid_samples=928
batch_size=4

if __name__=='__main__':
    ap=argparse.ArgumentParser()
    ap.add_argument("-p","--pretrained",help="path to save pretrained model files")
    args=vars(ap.parse_args())
    pretrained_path=args["pretrained"]
    checkpoint_models_path='models/'

    if(pretrained_path is None):
        pretrained_path=get_best_model()
#第一次训练时,把这一行注释掉,等训练后有model后,再取消注释
    tensor_board = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
    model_names = checkpoint_models_path + 'model.{epoch:02d}-{val_loss:.4f}.hdf5'
    model_checkpoint = ModelCheckpoint(model_names, monitor='val_loss', verbose=1, save_best_only=True)
    early_stop = EarlyStopping('val_loss', patience=patience)
    reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience / 4), verbose=1)

    class MyCbk(keras.callbacks.Callback):
        def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model

        def on_epoch_end(self, epoch, logs=None):
            fmt = checkpoint_models_path + 'model.%02d-%.4f.hdf5'
            highest_acc = get_lowest_loss()
            if float(logs['val_acc']) > highest_acc:
                self.model_to_save.save(fmt % (epoch, logs['val_acc']))

    # Load our model
    new_model = build_model()
    if pretrained_path is not None:
        new_model.load_weights(pretrained_path)

    adam = keras.optimizers.Adam(lr=1e-4, beta_1=0.9, beta_2=0.99, epsilon=1e-08, decay=5E-6)
    # sgd = keras.optimizers.SGD(lr=1e-5, decay=1e-6, momentum=0.9, nesterov=True)
    new_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
    # new_model.compile(optimizer=adam, loss=[focal_loss(alpha=.25, gamma=2)], metrics=['accuracy'])

    print(new_model.summary())

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    # Start Fine-tuning
    new_model.fit_generator(train_gen(),
                            steps_per_epoch=num_train_samples // batch_size,
                            validation_data=valid_gen(),
                            validation_steps=num_valid_samples // batch_size,
                            epochs=epochs,
                            verbose=1,
                            callbacks=callbacks,
                            shuffle=True
                            )


点赞呦!

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值