Resnet训练:基于 keras 实现 37万参数,Resnet26,cifar10 上训练精度达到 91.90%

# @Time : 2022/1/4 8:00 
# @Author : PeinuanQin
# @File : train.py

导包

from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import *
from keras.datasets import *
from keras.losses import *
from keras.utils import to_categorical
from keras.layers import *
from keras.regularizers import *
from keras.models import *
from keras.optimizers import *
os.environ["CUDA_VISIBLE_DEVICES"] = "1"

定义网络

# @Time : 2022/1/5 0:41 
# @Author : PeinuanQin
# @File : ResNet.py

class ResNet:

    def __init__(self,name, depth,input_shape, classes, *layer_channels):
        self.name = name
        self.input_shape = input_shape
        self.depth = depth
        self.classes = classes
        self.layer_channels = layer_channels
        self.kernel_initializer = "he_normal"
        self.kernel_regularizer = l2(1e-4)   ## 正则化给大了影响训练
        self.seed = np.random.seed(30)
        self.counter = 0


    def Conv2d_BN(self,input_tensor, channels, strides=1, kernel_size=3, activation=True):

        self.counter += 1
        x = Conv2D(filters=channels,
                  kernel_size=kernel_size,
                  strides=strides,
                  kernel_regularizer=self.kernel_regularizer,
                  kernel_initializer=self.kernel_initializer,
                  padding='same')(input_tensor)

        bn = BatchNormalization(name="bn_{0}".format(self.counter))(x)
        if activation == False:
             return bn
        else:
            act = Activation("relu", name="relu_{0}".format(self.counter))(bn)
            return act

    def BasicBlock(self, input_tensor, filters=64, strides=1):

        x = self.Conv2d_BN(channels=filters,
                           input_tensor=input_tensor,
                           strides=strides,
                           activation=True)

        ## 这里不要激活,要和 shortcut 结合了再激活
        x = self.Conv2d_BN(channels=filters,
                           input_tensor=x,
                           strides=1,
                           activation=False)

        shortcut = Conv2D(filters,
                          (1, 1),
                          strides=strides)(input_tensor)

        shortcut = BatchNormalization()(shortcut)
        x = add([x, shortcut])
        x = Activation('relu')(x)
        return x

    def make_network(self):

        num_blocks = (self.depth - 2) // (2*len(self.layer_channels))

        inputs = Input(shape=self.input_shape)
        print(type(inputs))
        '''
        the first block
        '''
        x = self.Conv2d_BN(channels=self.layer_channels[0],
                           input_tensor=inputs,
                           strides=1,
                           activation=True)

        '''
        the resnet has totally 3 stack
        each stack has serveral blocks (num_blocks)
        '''
        for stack in range(len(self.layer_channels)): # 有几个不同类型的 filter 就几个 stack
            for block_id in range(num_blocks):
                strides = 1
                if stack > 0 and block_id == 0:
                    '''确保只有第一个 stack 的所有 blocks 中的 conv stride=1
                       而其他 stack 中都在第一个 block 中存在 stride=2 的情况'''
                    strides = 2
                x = self.BasicBlock(input_tensor=x,         ## 每个 block 里面有两个用于提取特征的卷积层
                                    filters=self.layer_channels[stack],
                                    strides=strides)

#             x = Lambda(lambda x: x, name='feature%d' % stack)(x)

        x = GlobalAvgPool2D(name='feature_final')(x)

        x = Dense(self.classes,
                  kernel_initializer=self.kernel_initializer,
                  kernel_regularizer= self.kernel_regularizer,
                  name='logits')(x)

        x = Activation('softmax')(x)

        return Model(inputs, x, name='%s_model' % self.name)

tools

def image_augmentation_generator(rotation_range=20,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.,
                         zoom_range=0.,
                         zca_epsilon=1e-6,
                         horizontal_flip=True,
                         fill_mode='nearest'):
    """
    for image augmentation
    :param rotation_range:
    :param width_shift_range:
    :param height_shift_range:
    :param shear_range:
    :param zoom_range:
    :param zca_epsilon:
    :param horizontal_flip:
    :param fill_mode:
    :return: a generator
    """
    return ImageDataGenerator(rotation_range=rotation_range,
                         width_shift_range=width_shift_range,
                         height_shift_range=height_shift_range,
                         shear_range=shear_range,
                         zoom_range=zoom_range,
                         zca_epsilon=zca_epsilon,
                         horizontal_flip=horizontal_flip,
                         fill_mode=fill_mode)


def lr_sche(epoch):
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 150:
        lr *= 1e-2
    elif epoch > 120:
        lr *= 1e-1
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr


def lr_sche_sgd(epoch):
    lr = 1e-1
    if epoch > 240:
        lr *= 0.5e-3
    elif epoch > 185:
        lr *= 1e-2
    elif epoch > 135:
        lr *= 1e-1
    # elif epoch > 30:
    #     lr *= 1e-1
    print('Learning rate: ', lr)
    return lr

def lr_scheduler(lr_schedule,verbose=1):
    """
    Create a learning scheduler
    :param lr_schedule: a learning schedule
    :param verbose: default=1
    :return:
    """
    return LearningRateScheduler(schedule=lr_schedule,
                                 verbose=verbose)

def makedir_makefilepath(dir_name,file_name):
    # get current directory in a unicode string
    current_dir = os.getcwd()
    # join current directory and target dir
    save_dir = os.path.join(current_dir,dir_name)
    # if the dir not exists, create it
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)
    # join the target dir to make the target path
    file_path = os.path.join(save_dir,file_name)

    return file_path

def training_data_process(batch_size,dataset='cifar10',aug=True):
    """
    :param batch_size:
    :param dataset:
    :param aug:
    :return:
    """

    input_shape = (32, 32, 3)
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    if dataset == 'fashionmnist':
        input_shape = (28,28,1)
        (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
        x_train = x_train.reshape(x_train.shape[0],28,28,1)
        x_test = x_test.reshape(x_test.shape[0],28,28,1)

    if dataset == 'mnist':
        input_shape = (28, 28, 1)
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
        x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)

    if dataset == 'cifar100':
        input_shape = (32, 32, 3)
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()

    y_test = to_categorical(y_test)
    y_train = to_categorical(y_train)
    x_train = x_train / 255.
    x_test = x_test / 255.

    if aug == True:
        aug = image_augmentation_generator()
        aug.fit(x_train)
        imgGen = aug.flow(x_train, y_train, batch_size=batch_size, shuffle=True)
    else:
        return input_shape, x_train, y_train, x_test, y_test

    return input_shape, imgGen, x_test, y_test

def search_best_weights_in_doc(doc_path):
    lst = [x for x in os.listdir(doc_path) if x.endswith(".h5")]
    lst.sort(key=lambda x:int(x.split(".")[1].split(".")[0]),reverse=True)
    print("loading %s"%lst[0])
    epochs = int(lst[0].split(".")[1].split(".")[0])
    return epochs, os.path.join(doc_path, lst[0])

定义训练过程

def train_ResNet():
    batch_size = 64
    seed = 30
    print("train resnet without attention")
    res = ResNet("resnet26",26,(32,32,3),10,16,32,64).make_network()
#     optimizer = SGD(lr=LR
#                     , momentum=0.9
#                     , decay=5e-4)  # 优化方式为mini-batch momentum-SGD,并采用L2正则化(权重衰减)
    optimizer_adam = Adam(0.001)
    np.random.seed(seed)
    input_shape, imgGen, x_test, y_test = training_data_process(batch_size, aug=True)

    model_name = 'resnet26.{epoch:03d}.h5'

    filepath = makedir_makefilepath('./trained_model/cifar10/resnet26', model_name)
    checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_acc',verbose=1,save_best_only=True)
    tensorboard = TensorBoard('./trained_model/cifar10/resnet26')
    early_stop = EarlyStopping(monitor="val_acc",patience=35,verbose=1)


    res.compile(optimizer=optimizer_adam,
             loss=categorical_crossentropy,
             metrics=["accuracy"])

    res.fit_generator(imgGen,
                         steps_per_epoch=50000 // batch_size,
                         epochs=200,
                         verbose=1,
                         validation_data=(x_test, y_test),
                         callbacks=[
#                              lr_scheduler(lr_sche_sgd)
                             lr_scheduler(lr_sche)
                             , checkpoint
                             , tensorboard
                            ,early_stop])
train_ResNet()

训练日志

Epoch 00001: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 53s 68ms/step - loss: 1.7858 - acc: 0.4089 - val_loss: 2.4903 - val_acc: 0.3393

Epoch 00001: val_acc improved from -inf to 0.33930, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.001.h5
Epoch 2/200
Learning rate: 0.001

Epoch 00002: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 43s 55ms/step - loss: 1.4370 - acc: 0.5480 - val_loss: 1.6219 - val_acc: 0.5197

Epoch 00002: val_acc improved from 0.33930 to 0.51970, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.002.h5
Epoch 3/200
Learning rate: 0.001

Epoch 00003: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 1.2202 - acc: 0.6289 - val_loss: 1.6177 - val_acc: 0.5413

Epoch 00003: val_acc improved from 0.51970 to 0.54130, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.003.h5
Epoch 4/200
Learning rate: 0.001

Epoch 00004: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 1.0860 - acc: 0.6794 - val_loss: 1.1707 - val_acc: 0.6543

Epoch 00004: val_acc improved from 0.54130 to 0.65430, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.004.h5
Epoch 5/200
Learning rate: 0.001

Epoch 00005: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.9973 - acc: 0.7105 - val_loss: 1.0414 - val_acc: 0.6984

Epoch 00005: val_acc improved from 0.65430 to 0.69840, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.005.h5
Epoch 6/200
Learning rate: 0.001

Epoch 00006: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.9307 - acc: 0.7358 - val_loss: 1.1164 - val_acc: 0.6862

Epoch 00006: val_acc did not improve from 0.69840
Epoch 7/200
Learning rate: 0.001

Epoch 00007: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.8846 - acc: 0.7517 - val_loss: 1.3919 - val_acc: 0.6405

Epoch 00007: val_acc did not improve from 0.69840
Epoch 8/200
Learning rate: 0.001

Epoch 00008: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.8468 - acc: 0.7650 - val_loss: 1.0436 - val_acc: 0.7210

Epoch 00008: val_acc improved from 0.69840 to 0.72100, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.008.h5
Epoch 9/200
Learning rate: 0.001

Epoch 00009: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.8104 - acc: 0.7775 - val_loss: 0.9483 - val_acc: 0.7394

Epoch 00009: val_acc improved from 0.72100 to 0.73940, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.009.h5
Epoch 10/200
Learning rate: 0.001

Epoch 00010: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.7841 - acc: 0.7876 - val_loss: 0.9037 - val_acc: 0.7548

Epoch 00010: val_acc improved from 0.73940 to 0.75480, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.010.h5
Epoch 11/200
Learning rate: 0.001

Epoch 00011: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.7606 - acc: 0.7953 - val_loss: 0.9882 - val_acc: 0.7383

Epoch 00011: val_acc did not improve from 0.75480
Epoch 12/200
Learning rate: 0.001

Epoch 00012: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.7440 - acc: 0.8016 - val_loss: 0.9113 - val_acc: 0.7536

Epoch 00012: val_acc did not improve from 0.75480
Epoch 13/200
Learning rate: 0.001

Epoch 00013: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 43s 56ms/step - loss: 0.7207 - acc: 0.8089 - val_loss: 1.1794 - val_acc: 0.6960

Epoch 00013: val_acc did not improve from 0.75480
Epoch 14/200
Learning rate: 0.001

Epoch 00014: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.7105 - acc: 0.8128 - val_loss: 0.9037 - val_acc: 0.7608

Epoch 00014: val_acc improved from 0.75480 to 0.76080, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.014.h5
Epoch 15/200
Learning rate: 0.001

Epoch 00015: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.6942 - acc: 0.8199 - val_loss: 0.8343 - val_acc: 0.7855

Epoch 00015: val_acc improved from 0.76080 to 0.78550, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.015.h5
Epoch 16/200
Learning rate: 0.001

Epoch 00016: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.6800 - acc: 0.8262 - val_loss: 1.1409 - val_acc: 0.7092

Epoch 00016: val_acc did not improve from 0.78550
Epoch 17/200
Learning rate: 0.001

Epoch 00017: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.6755 - acc: 0.8228 - val_loss: 0.7853 - val_acc: 0.8000

Epoch 00017: val_acc improved from 0.78550 to 0.80000, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.017.h5
Epoch 18/200
Learning rate: 0.001

Epoch 00018: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.6596 - acc: 0.8325 - val_loss: 0.7511 - val_acc: 0.7989

Epoch 00018: val_acc did not improve from 0.80000
Epoch 19/200
Learning rate: 0.001

Epoch 00019: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.6486 - acc: 0.8360 - val_loss: 0.8712 - val_acc: 0.7752

Epoch 00019: val_acc did not improve from 0.80000
Epoch 20/200
Learning rate: 0.001

Epoch 00020: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.6417 - acc: 0.8389 - val_loss: 1.0169 - val_acc: 0.7556

Epoch 00020: val_acc did not improve from 0.80000
Epoch 21/200
Learning rate: 0.001

Epoch 00021: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.6324 - acc: 0.8431 - val_loss: 0.8230 - val_acc: 0.7815

Epoch 00021: val_acc did not improve from 0.80000
Epoch 22/200
Learning rate: 0.001

Epoch 00022: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.6289 - acc: 0.8432 - val_loss: 0.7393 - val_acc: 0.8071

Epoch 00022: val_acc improved from 0.80000 to 0.80710, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.022.h5
Epoch 23/200
Learning rate: 0.001

Epoch 00023: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.6132 - acc: 0.8502 - val_loss: 0.7149 - val_acc: 0.8193

Epoch 00023: val_acc improved from 0.80710 to 0.81930, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.023.h5
Epoch 24/200
Learning rate: 0.001

Epoch 00024: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.6103 - acc: 0.8495 - val_loss: 0.8710 - val_acc: 0.7697

Epoch 00024: val_acc did not improve from 0.81930
Epoch 25/200
Learning rate: 0.001

Epoch 00025: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.6001 - acc: 0.8540 - val_loss: 1.1150 - val_acc: 0.7286

Epoch 00025: val_acc did not improve from 0.81930
Epoch 26/200
Learning rate: 0.001

Epoch 00026: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.5961 - acc: 0.8564 - val_loss: 0.7177 - val_acc: 0.8202

Epoch 00026: val_acc improved from 0.81930 to 0.82020, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.026.h5
Epoch 27/200
Learning rate: 0.001

Epoch 00027: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 43s 55ms/step - loss: 0.5941 - acc: 0.8572 - val_loss: 1.0045 - val_acc: 0.7443

Epoch 00027: val_acc did not improve from 0.82020
Epoch 28/200
Learning rate: 0.001

Epoch 00028: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.5841 - acc: 0.8588 - val_loss: 0.7119 - val_acc: 0.8278

Epoch 00028: val_acc improved from 0.82020 to 0.82780, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.028.h5
Epoch 29/200
Learning rate: 0.001

Epoch 00029: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.5830 - acc: 0.8611 - val_loss: 1.0354 - val_acc: 0.7455

Epoch 00029: val_acc did not improve from 0.82780
Epoch 30/200
Learning rate: 0.001

Epoch 00030: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.5729 - acc: 0.8652 - val_loss: 0.7174 - val_acc: 0.8205

Epoch 00030: val_acc did not improve from 0.82780
Epoch 31/200
Learning rate: 0.001

Epoch 00031: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.5680 - acc: 0.8639 - val_loss: 0.7219 - val_acc: 0.8185

Epoch 00031: val_acc did not improve from 0.82780
Epoch 32/200
Learning rate: 0.001

Epoch 00032: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.5655 - acc: 0.8654 - val_loss: 0.6746 - val_acc: 0.8305

Epoch 00032: val_acc improved from 0.82780 to 0.83050, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.032.h5
Epoch 33/200
Learning rate: 0.001

Epoch 00033: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.5612 - acc: 0.8688 - val_loss: 0.6719 - val_acc: 0.8409

Epoch 00033: val_acc improved from 0.83050 to 0.84090, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.033.h5
Epoch 34/200
Learning rate: 0.001

Epoch 00034: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.5567 - acc: 0.8671 - val_loss: 0.7339 - val_acc: 0.8233

Epoch 00034: val_acc did not improve from 0.84090
Epoch 35/200
Learning rate: 0.001

Epoch 00035: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 46s 59ms/step - loss: 0.5570 - acc: 0.8700 - val_loss: 0.7034 - val_acc: 0.8317

Epoch 00035: val_acc did not improve from 0.84090
Epoch 36/200
Learning rate: 0.001

Epoch 00036: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.5497 - acc: 0.8713 - val_loss: 0.8814 - val_acc: 0.7713

Epoch 00036: val_acc did not improve from 0.84090
Epoch 37/200
Learning rate: 0.001

Epoch 00037: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.5421 - acc: 0.8735 - val_loss: 0.7116 - val_acc: 0.8272

Epoch 00037: val_acc did not improve from 0.84090
Epoch 38/200
Learning rate: 0.001

Epoch 00038: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.5426 - acc: 0.8753 - val_loss: 0.7206 - val_acc: 0.8250

Epoch 00038: val_acc did not improve from 0.84090
Epoch 39/200
Learning rate: 0.001

Epoch 00039: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.5442 - acc: 0.8753 - val_loss: 0.8074 - val_acc: 0.7989

Epoch 00039: val_acc did not improve from 0.84090
Epoch 40/200
Learning rate: 0.001

Epoch 00040: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.5349 - acc: 0.8774 - val_loss: 0.6811 - val_acc: 0.8370

Epoch 00040: val_acc did not improve from 0.84090
Epoch 41/200
Learning rate: 0.001

Epoch 00041: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 46s 58ms/step - loss: 0.5293 - acc: 0.8772 - val_loss: 0.8169 - val_acc: 0.8045

Epoch 00041: val_acc did not improve from 0.84090
Epoch 42/200
Learning rate: 0.001

Epoch 00042: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.5305 - acc: 0.8779 - val_loss: 0.6868 - val_acc: 0.8363

Epoch 00042: val_acc did not improve from 0.84090
Epoch 43/200
Learning rate: 0.001

Epoch 00043: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.5305 - acc: 0.8803 - val_loss: 0.8123 - val_acc: 0.8053

Epoch 00043: val_acc did not improve from 0.84090
Epoch 44/200
Learning rate: 0.001

Epoch 00044: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.5207 - acc: 0.8811 - val_loss: 0.7438 - val_acc: 0.8234

Epoch 00044: val_acc did not improve from 0.84090
Epoch 45/200
Learning rate: 0.001

Epoch 00045: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.5243 - acc: 0.8807 - val_loss: 0.7278 - val_acc: 0.8272

Epoch 00045: val_acc did not improve from 0.84090
Epoch 46/200
Learning rate: 0.001

Epoch 00046: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 43s 55ms/step - loss: 0.5284 - acc: 0.8803 - val_loss: 0.7789 - val_acc: 0.8137

Epoch 00046: val_acc did not improve from 0.84090
Epoch 47/200
Learning rate: 0.001

Epoch 00047: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.5166 - acc: 0.8828 - val_loss: 0.6982 - val_acc: 0.8438

Epoch 00047: val_acc improved from 0.84090 to 0.84380, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.047.h5
Epoch 48/200
Learning rate: 0.001

Epoch 00048: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.5137 - acc: 0.8841 - val_loss: 0.8411 - val_acc: 0.7891

Epoch 00048: val_acc did not improve from 0.84380
Epoch 49/200
Learning rate: 0.001

Epoch 00049: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.5116 - acc: 0.8859 - val_loss: 0.6208 - val_acc: 0.8572

Epoch 00049: val_acc improved from 0.84380 to 0.85720, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.049.h5
Epoch 50/200
Learning rate: 0.001

Epoch 00050: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.5140 - acc: 0.8836 - val_loss: 0.6350 - val_acc: 0.8516

Epoch 00050: val_acc did not improve from 0.85720
Epoch 51/200
Learning rate: 0.001

Epoch 00051: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.5081 - acc: 0.8867 - val_loss: 1.2249 - val_acc: 0.7143

Epoch 00051: val_acc did not improve from 0.85720
Epoch 52/200
Learning rate: 0.001

Epoch 00052: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.5089 - acc: 0.8858 - val_loss: 0.8842 - val_acc: 0.7908

Epoch 00052: val_acc did not improve from 0.85720
Epoch 53/200
Learning rate: 0.001

Epoch 00053: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.5058 - acc: 0.8881 - val_loss: 0.7280 - val_acc: 0.8151

Epoch 00053: val_acc did not improve from 0.85720
Epoch 54/200
Learning rate: 0.001

Epoch 00054: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.5067 - acc: 0.8853 - val_loss: 0.6312 - val_acc: 0.8515

Epoch 00054: val_acc did not improve from 0.85720
Epoch 55/200
Learning rate: 0.001

Epoch 00055: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.4988 - acc: 0.8884 - val_loss: 0.6850 - val_acc: 0.8399

Epoch 00055: val_acc did not improve from 0.85720
Epoch 56/200
Learning rate: 0.001

Epoch 00056: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.4982 - acc: 0.8886 - val_loss: 0.6930 - val_acc: 0.8385

Epoch 00056: val_acc did not improve from 0.85720
Epoch 57/200
Learning rate: 0.001

Epoch 00057: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.5010 - acc: 0.8888 - val_loss: 0.9914 - val_acc: 0.7567

Epoch 00057: val_acc did not improve from 0.85720
Epoch 58/200
Learning rate: 0.001

Epoch 00058: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.4974 - acc: 0.8892 - val_loss: 0.7194 - val_acc: 0.8253

Epoch 00058: val_acc did not improve from 0.85720
Epoch 59/200
Learning rate: 0.001

Epoch 00059: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.4939 - acc: 0.8902 - val_loss: 0.6861 - val_acc: 0.8378

Epoch 00059: val_acc did not improve from 0.85720
Epoch 60/200
Learning rate: 0.001

Epoch 00060: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.4910 - acc: 0.8926 - val_loss: 0.6110 - val_acc: 0.8560

Epoch 00060: val_acc did not improve from 0.85720
Epoch 61/200
Learning rate: 0.001

Epoch 00061: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 43s 55ms/step - loss: 0.4917 - acc: 0.8916 - val_loss: 0.7389 - val_acc: 0.8232

Epoch 00061: val_acc did not improve from 0.85720
Epoch 62/200
Learning rate: 0.001

Epoch 00062: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.4895 - acc: 0.8924 - val_loss: 0.7131 - val_acc: 0.8330

Epoch 00063: val_acc did not improve from 0.85720
Epoch 64/200
Learning rate: 0.001

Epoch 00064: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.4873 - acc: 0.8936 - val_loss: 0.6369 - val_acc: 0.8573

Epoch 00064: val_acc improved from 0.85720 to 0.85730, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.064.h5
Epoch 65/200
Learning rate: 0.001

Epoch 00065: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 43s 55ms/step - loss: 0.4867 - acc: 0.8924 - val_loss: 0.5546 - val_acc: 0.8733

Epoch 00065: val_acc improved from 0.85730 to 0.87330, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.065.h5
Epoch 66/200
Learning rate: 0.001

Epoch 00066: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 43s 55ms/step - loss: 0.4814 - acc: 0.8951 - val_loss: 0.7428 - val_acc: 0.8184

Epoch 00066: val_acc did not improve from 0.87330
Epoch 67/200
Learning rate: 0.001

Epoch 00067: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 43s 55ms/step - loss: 0.4752 - acc: 0.8960 - val_loss: 0.5826 - val_acc: 0.8699

Epoch 00067: val_acc did not improve from 0.87330
Epoch 68/200
Learning rate: 0.001

Epoch 00068: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 43s 55ms/step - loss: 0.4789 - acc: 0.8947 - val_loss: 0.6436 - val_acc: 0.8507

Epoch 00068: val_acc did not improve from 0.87330
Epoch 69/200
Learning rate: 0.001

Epoch 00069: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 42s 54ms/step - loss: 0.4758 - acc: 0.8957 - val_loss: 0.7689 - val_acc: 0.8227

Epoch 00069: val_acc did not improve from 0.87330
Epoch 70/200
Learning rate: 0.001

Epoch 00070: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 41s 53ms/step - loss: 0.4811 - acc: 0.8937 - val_loss: 0.6358 - val_acc: 0.8555

Epoch 00070: val_acc did not improve from 0.87330
Epoch 71/200
Learning rate: 0.001

Epoch 00071: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 41s 53ms/step - loss: 0.4795 - acc: 0.8947 - val_loss: 0.5988 - val_acc: 0.8602

Epoch 00071: val_acc did not improve from 0.87330
Epoch 72/200
Learning rate: 0.001

Epoch 00072: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 42s 54ms/step - loss: 0.4726 - acc: 0.8974 - val_loss: 0.8370 - val_acc: 0.8054

Epoch 00072: val_acc did not improve from 0.87330
Epoch 73/200
Learning rate: 0.001

Epoch 00073: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 43s 55ms/step - loss: 0.4725 - acc: 0.8982 - val_loss: 0.7109 - val_acc: 0.8340

Epoch 00073: val_acc did not improve from 0.87330
Epoch 74/200
Learning rate: 0.001

Epoch 00074: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.4749 - acc: 0.8959 - val_loss: 0.7018 - val_acc: 0.8342

Epoch 00074: val_acc did not improve from 0.87330
Epoch 75/200
Learning rate: 0.001

Epoch 00075: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 42s 54ms/step - loss: 0.4679 - acc: 0.8998 - val_loss: 0.6416 - val_acc: 0.8537

Epoch 00075: val_acc did not improve from 0.87330
Epoch 76/200
Learning rate: 0.001

Epoch 00076: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 56ms/step - loss: 0.4725 - acc: 0.8975 - val_loss: 0.6873 - val_acc: 0.8310

Epoch 00076: val_acc did not improve from 0.87330
Epoch 77/200
Learning rate: 0.001

Epoch 00077: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.4712 - acc: 0.8971 - val_loss: 0.5810 - val_acc: 0.8658

Epoch 00077: val_acc did not improve from 0.87330
Epoch 78/200
Learning rate: 0.001

Epoch 00078: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 44s 57ms/step - loss: 0.4676 - acc: 0.8968 - val_loss: 0.6586 - val_acc: 0.8399

Epoch 00078: val_acc did not improve from 0.87330
Epoch 79/200
Learning rate: 0.001

Epoch 00079: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 58ms/step - loss: 0.4661 - acc: 0.8985 - val_loss: 0.7387 - val_acc: 0.8317

Epoch 00079: val_acc did not improve from 0.87330
Epoch 80/200
Learning rate: 0.001

Epoch 00080: LearningRateScheduler reducing learning rate to 0.001.
781/781 [==============================] - 45s 57ms/step - loss: 0.4610 - acc: 0.9004 - val_loss: 0.6348 - val_acc: 0.8576

Epoch 00081: val_acc did not improve from 0.87330
Epoch 82/200
Learning rate: 0.0001

Epoch 00082: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 45s 57ms/step - loss: 0.3943 - acc: 0.9235 - val_loss: 0.4477 - val_acc: 0.9081

Epoch 00082: val_acc improved from 0.87330 to 0.90810, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.082.h5
Epoch 83/200
Learning rate: 0.0001

Epoch 00083: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 44s 56ms/step - loss: 0.3619 - acc: 0.9346 - val_loss: 0.4515 - val_acc: 0.9065

Epoch 00083: val_acc did not improve from 0.90810
Epoch 84/200
Learning rate: 0.0001

Epoch 00084: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 44s 57ms/step - loss: 0.3545 - acc: 0.9361 - val_loss: 0.4615 - val_acc: 0.9025

Epoch 00084: val_acc did not improve from 0.90810
Epoch 85/200
Learning rate: 0.0001

Epoch 00085: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 44s 56ms/step - loss: 0.3324 - acc: 0.9431 - val_loss: 0.4291 - val_acc: 0.9126

Epoch 00086: val_acc improved from 0.90810 to 0.91260, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.086.h5
Epoch 87/200
Learning rate: 0.0001

Epoch 00087: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.3274 - acc: 0.9438 - val_loss: 0.4502 - val_acc: 0.9060

Epoch 00087: val_acc did not improve from 0.91260
Epoch 88/200
Learning rate: 0.0001

Epoch 00088: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.3247 - acc: 0.9435 - val_loss: 0.4427 - val_acc: 0.9087

Epoch 00088: val_acc did not improve from 0.91260
Epoch 89/200
Learning rate: 0.0001

Epoch 00089: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.3174 - acc: 0.9459 - val_loss: 0.4510 - val_acc: 0.9049

Epoch 00089: val_acc did not improve from 0.91260
Epoch 90/200
Learning rate: 0.0001

Epoch 00090: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.3097 - acc: 0.9472 - val_loss: 0.4538 - val_acc: 0.9050

Epoch 00090: val_acc did not improve from 0.91260
Epoch 91/200
Learning rate: 0.0001

Epoch 00091: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.3072 - acc: 0.9477 - val_loss: 0.4210 - val_acc: 0.9147

Epoch 00091: val_acc improved from 0.91260 to 0.91470, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.091.h5
Epoch 92/200
Learning rate: 0.0001

Epoch 00092: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 44s 57ms/step - loss: 0.3037 - acc: 0.9496 - val_loss: 0.4365 - val_acc: 0.9104

Epoch 00092: val_acc did not improve from 0.91470
Epoch 93/200
Learning rate: 0.0001

Epoch 00093: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.3005 - acc: 0.9488 - val_loss: 0.4399 - val_acc: 0.9088

Epoch 00093: val_acc did not improve from 0.91470
Epoch 94/200
Learning rate: 0.0001

Epoch 00094: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2984 - acc: 0.9495 - val_loss: 0.4407 - val_acc: 0.9087

Epoch 00094: val_acc did not improve from 0.91470
Epoch 95/200
Learning rate: 0.0001

Epoch 00095: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 44s 56ms/step - loss: 0.2896 - acc: 0.9516 - val_loss: 0.4206 - val_acc: 0.9145

Epoch 00095: val_acc did not improve from 0.91470
Epoch 96/200
Learning rate: 0.0001

Epoch 00096: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2889 - acc: 0.9507 - val_loss: 0.4225 - val_acc: 0.9136

Epoch 00096: val_acc did not improve from 0.91470
Epoch 97/200
Learning rate: 0.0001

Epoch 00097: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2839 - acc: 0.9529 - val_loss: 0.4325 - val_acc: 0.9106

Epoch 00097: val_acc did not improve from 0.91470
Epoch 98/200
Learning rate: 0.0001

Epoch 00098: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2831 - acc: 0.9524 - val_loss: 0.4185 - val_acc: 0.9157

Epoch 00098: val_acc improved from 0.91470 to 0.91570, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.098.h5
Epoch 99/200
Learning rate: 0.0001

Epoch 00099: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2781 - acc: 0.9539 - val_loss: 0.4450 - val_acc: 0.9081

Epoch 00099: val_acc did not improve from 0.91570
Epoch 100/200
Learning rate: 0.0001

Epoch 00100: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2780 - acc: 0.9532 - val_loss: 0.4511 - val_acc: 0.9049

Epoch 00100: val_acc did not improve from 0.91570
Epoch 101/200
Learning rate: 0.0001

Epoch 00101: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2733 - acc: 0.9545 - val_loss: 0.4426 - val_acc: 0.9053

Epoch 00101: val_acc did not improve from 0.91570
Epoch 102/200
Learning rate: 0.0001

Epoch 00102: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2711 - acc: 0.9541 - val_loss: 0.4265 - val_acc: 0.9111

Epoch 00102: val_acc did not improve from 0.91570
Epoch 103/200
Learning rate: 0.0001

Epoch 00103: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2688 - acc: 0.9551 - val_loss: 0.4338 - val_acc: 0.9099

Epoch 00103: val_acc did not improve from 0.91570
Epoch 104/200
Learning rate: 0.0001

Epoch 00104: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2661 - acc: 0.9559 - val_loss: 0.4702 - val_acc: 0.9010

Epoch 00104: val_acc did not improve from 0.91570
Epoch 105/200
Learning rate: 0.0001

Epoch 00105: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2660 - acc: 0.9558 - val_loss: 0.4387 - val_acc: 0.9089

Epoch 00105: val_acc did not improve from 0.91570
Epoch 106/200
Learning rate: 0.0001

Epoch 00106: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2598 - acc: 0.9571 - val_loss: 0.4486 - val_acc: 0.9060

Epoch 00106: val_acc did not improve from 0.91570
Epoch 107/200
Learning rate: 0.0001

Epoch 00107: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2592 - acc: 0.9563 - val_loss: 0.4266 - val_acc: 0.9129

Epoch 00107: val_acc did not improve from 0.91570
Epoch 108/200
Learning rate: 0.0001

Epoch 00108: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2592 - acc: 0.9565 - val_loss: 0.4195 - val_acc: 0.9141

Epoch 00108: val_acc did not improve from 0.91570
Epoch 109/200
Learning rate: 0.0001

Epoch 00109: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2545 - acc: 0.9574 - val_loss: 0.4238 - val_acc: 0.9108

Epoch 00109: val_acc did not improve from 0.91570
Epoch 110/200
Learning rate: 0.0001

Epoch 00110: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2514 - acc: 0.9585 - val_loss: 0.4396 - val_acc: 0.9069

Epoch 00110: val_acc did not improve from 0.91570
Epoch 111/200
Learning rate: 0.0001

Epoch 00111: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2532 - acc: 0.9578 - val_loss: 0.4330 - val_acc: 0.9107

Epoch 00111: val_acc did not improve from 0.91570
Epoch 112/200
Learning rate: 0.0001

Epoch 00112: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2504 - acc: 0.9587 - val_loss: 0.4408 - val_acc: 0.9068

Epoch 00112: val_acc did not improve from 0.91570
Epoch 113/200
Learning rate: 0.0001

Epoch 00113: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2435 - acc: 0.9596 - val_loss: 0.4170 - val_acc: 0.9140

Epoch 00113: val_acc did not improve from 0.91570
Epoch 114/200
Learning rate: 0.0001

Epoch 00114: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 52ms/step - loss: 0.2411 - acc: 0.9608 - val_loss: 0.4295 - val_acc: 0.9100

Epoch 00114: val_acc did not improve from 0.91570
Epoch 115/200
Learning rate: 0.0001

Epoch 00115: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2416 - acc: 0.9598 - val_loss: 0.4302 - val_acc: 0.9102

Epoch 00115: val_acc did not improve from 0.91570
Epoch 116/200
Learning rate: 0.0001

Epoch 00116: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 52ms/step - loss: 0.2394 - acc: 0.9615 - val_loss: 0.4268 - val_acc: 0.9122

Epoch 00116: val_acc did not improve from 0.91570
Epoch 117/200
Learning rate: 0.0001

Epoch 00117: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2416 - acc: 0.9596 - val_loss: 0.4123 - val_acc: 0.9138

Epoch 00117: val_acc did not improve from 0.91570
Epoch 118/200
Learning rate: 0.0001

Epoch 00118: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2373 - acc: 0.9604 - val_loss: 0.3971 - val_acc: 0.9190

Epoch 00118: val_acc improved from 0.91570 to 0.91900, saving model to /home/wqx/workspace/qpn/KD/./trained_model/cifar10/resnet26/resnet26.118.h5
Epoch 119/200
Learning rate: 0.0001

Epoch 00119: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2372 - acc: 0.9612 - val_loss: 0.4179 - val_acc: 0.9132

Epoch 00119: val_acc did not improve from 0.91900
Epoch 120/200
Learning rate: 0.0001

Epoch 00120: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2334 - acc: 0.9621 - val_loss: 0.4063 - val_acc: 0.9135

Epoch 00120: val_acc did not improve from 0.91900
Epoch 121/200
Learning rate: 0.0001

Epoch 00121: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2338 - acc: 0.9621 - val_loss: 0.4177 - val_acc: 0.9133

Epoch 00121: val_acc did not improve from 0.91900
Epoch 122/200
Learning rate: 0.0001

Epoch 00122: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2291 - acc: 0.9629 - val_loss: 0.4060 - val_acc: 0.9149

Epoch 00122: val_acc did not improve from 0.91900
Epoch 123/200
Learning rate: 0.0001

Epoch 00123: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 52ms/step - loss: 0.2317 - acc: 0.9618 - val_loss: 0.4235 - val_acc: 0.9123

Epoch 00123: val_acc did not improve from 0.91900
Epoch 124/200
Learning rate: 0.0001

Epoch 00124: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 52ms/step - loss: 0.2265 - acc: 0.9637 - val_loss: 0.4223 - val_acc: 0.9117

Epoch 00124: val_acc did not improve from 0.91900
Epoch 125/200
Learning rate: 0.0001

Epoch 00125: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2260 - acc: 0.9639 - val_loss: 0.4145 - val_acc: 0.9133

Epoch 00125: val_acc did not improve from 0.91900
Epoch 126/200
Learning rate: 0.0001

Epoch 00126: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 43s 55ms/step - loss: 0.2237 - acc: 0.9641 - val_loss: 0.4088 - val_acc: 0.9159

Epoch 00126: val_acc did not improve from 0.91900
Epoch 127/200
Learning rate: 0.0001

Epoch 00127: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2248 - acc: 0.9640 - val_loss: 0.4359 - val_acc: 0.9078

Epoch 00127: val_acc did not improve from 0.91900
Epoch 128/200
Learning rate: 0.0001

Epoch 00128: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2218 - acc: 0.9639 - val_loss: 0.4024 - val_acc: 0.9164

Epoch 00128: val_acc did not improve from 0.91900
Epoch 129/200
Learning rate: 0.0001

Epoch 00129: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2201 - acc: 0.9640 - val_loss: 0.4336 - val_acc: 0.9087

Epoch 00129: val_acc did not improve from 0.91900
Epoch 130/200
Learning rate: 0.0001

Epoch 00130: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2183 - acc: 0.9656 - val_loss: 0.4347 - val_acc: 0.9097

Epoch 00130: val_acc did not improve from 0.91900
Epoch 131/200
Learning rate: 0.0001

Epoch 00131: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2170 - acc: 0.9652 - val_loss: 0.4207 - val_acc: 0.9127

Epoch 00131: val_acc did not improve from 0.91900
Epoch 132/200
Learning rate: 0.0001

Epoch 00132: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2152 - acc: 0.9650 - val_loss: 0.4194 - val_acc: 0.9118

Epoch 00132: val_acc did not improve from 0.91900
Epoch 133/200
Learning rate: 0.0001

Epoch 00133: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 52ms/step - loss: 0.2174 - acc: 0.9642 - val_loss: 0.4144 - val_acc: 0.9134

Epoch 00133: val_acc did not improve from 0.91900
Epoch 134/200
Learning rate: 0.0001

Epoch 00134: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2093 - acc: 0.9664 - val_loss: 0.4033 - val_acc: 0.9150

Epoch 00134: val_acc did not improve from 0.91900
Epoch 135/200
Learning rate: 0.0001

Epoch 00135: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2107 - acc: 0.9660 - val_loss: 0.4037 - val_acc: 0.9181

Epoch 00135: val_acc did not improve from 0.91900
Epoch 136/200
Learning rate: 0.0001

Epoch 00136: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 40s 51ms/step - loss: 0.2103 - acc: 0.9673 - val_loss: 0.4191 - val_acc: 0.9115

Epoch 00136: val_acc did not improve from 0.91900
Epoch 137/200
Learning rate: 0.0001

Epoch 00137: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2142 - acc: 0.9637 - val_loss: 0.4276 - val_acc: 0.9105

Epoch 00137: val_acc did not improve from 0.91900
Epoch 138/200
Learning rate: 0.0001

Epoch 00138: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 52ms/step - loss: 0.2103 - acc: 0.9661 - val_loss: 0.4062 - val_acc: 0.9164

Epoch 00138: val_acc did not improve from 0.91900
Epoch 139/200
Learning rate: 0.0001

Epoch 00139: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2068 - acc: 0.9675 - val_loss: 0.4137 - val_acc: 0.9136

Epoch 00139: val_acc did not improve from 0.91900
Epoch 140/200
Learning rate: 0.0001

Epoch 00140: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2078 - acc: 0.9667 - val_loss: 0.4276 - val_acc: 0.9091

Epoch 00140: val_acc did not improve from 0.91900
Epoch 141/200
Learning rate: 0.0001

Epoch 00141: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.2091 - acc: 0.9671 - val_loss: 0.4345 - val_acc: 0.9067

Epoch 00142: val_acc did not improve from 0.91900
Epoch 143/200
Learning rate: 0.0001

Epoch 00143: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 52ms/step - loss: 0.2047 - acc: 0.9674 - val_loss: 0.4116 - val_acc: 0.9141

Epoch 00143: val_acc did not improve from 0.91900
Epoch 144/200
Learning rate: 0.0001

Epoch 00144: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.2002 - acc: 0.9683 - val_loss: 0.4152 - val_acc: 0.9131

Epoch 00144: val_acc did not improve from 0.91900
Epoch 145/200
Learning rate: 0.0001

Epoch 00145: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2009 - acc: 0.9686 - val_loss: 0.3973 - val_acc: 0.9166

Epoch 00145: val_acc did not improve from 0.91900
Epoch 146/200
Learning rate: 0.0001

Epoch 00146: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.2017 - acc: 0.9684 - val_loss: 0.4168 - val_acc: 0.9131

Epoch 00146: val_acc did not improve from 0.91900
Epoch 147/200
Learning rate: 0.0001

Epoch 00147: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 40s 51ms/step - loss: 0.1986 - acc: 0.9685 - val_loss: 0.4245 - val_acc: 0.9088

Epoch 00147: val_acc did not improve from 0.91900
Epoch 148/200
Learning rate: 0.0001

Epoch 00148: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 53ms/step - loss: 0.1958 - acc: 0.9691 - val_loss: 0.4277 - val_acc: 0.9107

Epoch 00148: val_acc did not improve from 0.91900
Epoch 149/200
Learning rate: 0.0001

Epoch 00149: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 41s 52ms/step - loss: 0.1979 - acc: 0.9688 - val_loss: 0.4299 - val_acc: 0.9076

Epoch 00149: val_acc did not improve from 0.91900
Epoch 150/200
Learning rate: 0.0001

Epoch 00150: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 54ms/step - loss: 0.1979 - acc: 0.9678 - val_loss: 0.4254 - val_acc: 0.9117

Epoch 00150: val_acc did not improve from 0.91900
Epoch 151/200
Learning rate: 0.0001

Epoch 00151: LearningRateScheduler reducing learning rate to 0.0001.
781/781 [==============================] - 42s 53ms/step - loss: 0.1921 - acc: 0.9697 - val_loss: 0.4279 - val_acc: 0.9111

Epoch 00151: val_acc did not improve from 0.91900
Epoch 152/200
Learning rate: 1e-05

Epoch 00152: LearningRateScheduler reducing learning rate to 1e-05.
781/781 [==============================] - 42s 53ms/step - loss: 0.1895 - acc: 0.9721 - val_loss: 0.4117 - val_acc: 0.9142

Epoch 00152: val_acc did not improve from 0.91900
Epoch 153/200
Learning rate: 1e-05

Epoch 00153: LearningRateScheduler reducing learning rate to 1e-05.
781/781 [==============================] - 42s 53ms/step - loss: 0.1833 - acc: 0.9737 - val_loss: 0.4134 - val_acc: 0.9146

Epoch 00153: val_acc did not improve from 0.91900

训练曲线

在这里插入图片描述

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

暖仔会飞

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值