K同学[365天深度学习训练营]第十三周记录J4:ResNet与DenseNet结合探索

>- **🍨 本文为[🔗365天深度学习训练营](https://mp.weixin.qq.com/s/rbOOmire8OocQ90QM78DRA) 中的学习记录博客**
>- **🍖 原作者:[K同学啊 | 接辅导、项目定制](https://mtyjkh.blog.csdn.net/)** 

系统环境:WIN10-WSL2-Ubuntu22.04

- 语言环境:Python3.9.18

- 编译器:vscode+jupyter notebook

- 深度学习环境:Pytorch2.1.2

- 显卡:NVIDIA Tesla P40

本周要求将ResNet50和DenseNet模型融合起来

难度有点高。。。

靠chatgpt搞了个可行的融合模型的代码

import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, MaxPooling2D, Dense, GlobalAveragePooling2D, ZeroPadding2D, AveragePooling2D, Flatten, Add
from tensorflow.keras.models import Model

def identity_block(input_tensor, kernel_size, filters, stage, block):
    filters1, filters2, filters3 = filters

    name_base = str(stage) + block + '_identity_block_'

    x = Conv2D(filters1, (1, 1), name=name_base + 'conv1')(input_tensor)
    x = BatchNormalization(name=name_base + 'bn1')(x)
    x = Activation('relu', name=name_base + 'relu1')(x)

    x = Conv2D(filters2, kernel_size, padding='same', name=name_base + 'conv2')(x)
    x = BatchNormalization(name=name_base + 'bn2')(x)
    x = Activation('relu', name=name_base + 'relu2')(x)

    x = Conv2D(filters3, (1, 1), name=name_base + 'conv3')(x)
    x = BatchNormalization(name=name_base + 'bn3')(x)

    x = layers.add([x, input_tensor], name=name_base + 'add')
    x = Activation('relu', name=name_base + 'relu4')(x)
    return x

def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
    filters1, filters2, filters3 = filters

    res_name_base = str(stage) + block + '_conv_block_res_'
    name_base = str(stage) + block + '_conv_block_'

    x = Conv2D(filters1, (1, 1), strides=strides, name=name_base + 'conv1')(input_tensor)
    x = BatchNormalization(name=name_base + 'bn1')(x)
    x = Activation('relu', name=name_base + 'relu1')(x)

    x = Conv2D(filters2, kernel_size, padding='same', name=name_base + 'conv2')(x)
    x = BatchNormalization(name=name_base + 'bn2')(x)
    x = Activation('relu', name=name_base + 'relu2')(x)

    x = Conv2D(filters3, (1, 1), name=name_base + 'conv3')(x)
    x = BatchNormalization(name=name_base + 'bn3')(x)

    shortcut = Conv2D(filters3, (1, 1), strides=strides, name=res_name_base + 'conv')(input_tensor)
    shortcut = BatchNormalization(name=res_name_base + 'bn')(shortcut)

    x = layers.add([x, shortcut], name=name_base + 'add')
    x = Activation('relu', name=name_base + 'relu4')(x)
    return x

def ResNet50(input_shape=[224, 224, 3], classes=1000):
    img_input = Input(shape=input_shape)
    x = ZeroPadding2D((3, 3))(img_input)

    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = BatchNormalization(name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = AveragePooling2D((7, 7), name='avg_pooling')(x)
    x = Flatten()(x)

    x = Dense(classes, activation='softmax', name='fc1000')(x)
    model = Model(img_input, x, name='resnet50')
    return model


class DenseLayer(Model):
    def __init__(self, bottleneck_size, growth_rate):
        super().__init__()
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')
        self.c1 = Conv2D(filters=bottleneck_size, kernel_size=(1, 1), strides=1)
        self.b2 = BatchNormalization()
        self.a2 = Activation('relu')
        self.c2 = Conv2D(filters=growth_rate, kernel_size=(3, 3), strides=1, padding='same')

    def call(self, x):
        x = self.b1(x)
        x = self.a1(x)
        x = self.c1(x)
        x = self.b2(x)
        x = self.a2(x)
        return self.c2(x)


class DenseBlock(Model):
    def __init__(self, num_layers, growth_rate):
        super().__init__()
        self.dense_layers = [DenseLayer(4 * growth_rate, growth_rate) for _ in range(num_layers)]

    def call(self, x):
        for layer in self.dense_layers:
            new_x = layer(x)
            x = tf.concat([x, new_x], axis=-1)
        return x

class Transition(Model):
    def __init__(self, filters, compression_rate=0.5):
        super().__init__()
        self.b = BatchNormalization()
        self.a = Activation('relu')
        self.c = Conv2D(int(filters * compression_rate), kernel_size=(1, 1), strides=1)
        self.p = AveragePooling2D(pool_size=(2, 2), strides=2)

    def call(self, x):
        x = self.b(x)
        x = self.a(x)
        x = self.c(x)
        return self.p(x)

class DenseNet(Model):
    def __init__(self, block_list=[6, 12, 24, 16], compression_rate=0.5, initial_filters=64):
        super().__init__()
        self.padding = ZeroPadding2D(((3, 3), (3, 3)))  # Adjusted padding for 224x224 input
        self.c1 = Conv2D(initial_filters, kernel_size=(7, 7), strides=2, padding='valid')
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')
        self.p1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')
        self.blocks = tf.keras.Sequential()
        filters = initial_filters
        for i, num_layers in enumerate(block_list):
            self.blocks.add(DenseBlock(num_layers, growth_rate=32))
            filters += 32 * num_layers
            if i != len(block_list) - 1:  # No transition layer after the last block
                self.blocks.add(Transition(filters, compression_rate))
                filters = int(filters * compression_rate)
        self.p2 = GlobalAveragePooling2D()
        self.d2 = Dense(1000, activation='softmax')

    def call(self, inputs):
        x = self.padding(inputs)
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)
        x = self.p1(x)
        x = self.blocks(x)
        x = self.p2(x)
        return self.d2(x)

class ResNet50DenseNet(Model):
    def __init__(self, classes=1000):
        super(ResNet50DenseNet, self).__init__()
        self.resnet50_base = ResNet50(input_shape=(224, 224, 3), classes=classes)
        self.densenet_base = DenseNet(block_list=[6, 12, 24, 16], compression_rate=0.5, initial_filters=64)

        # 由于模型的基础部分已经包括了全局平均池化层,这里不再需要额外添加
        # self.global_average_pooling = GlobalAveragePooling2D()
        self.dense = Dense(classes, activation='softmax')

    def call(self, inputs):
        # 获取两个模型的特征
        x1 = self.resnet50_base(inputs)
        x2 = self.densenet_base(inputs)
        
        # 确保使用GlobalAveragePooling2D或相似操作将特征池化到一维向量
        # 如果base模型不以GlobalAveragePooling2D结束,则需要在此处添加
        # x1 = GlobalAveragePooling2D()(x1)
        # x2 = GlobalAveragePooling2D()(x2)

        # 合并特征
        x = tf.concat([x1, x2], axis=-1)
        
        # 直接传递给Dense层进行分类
        return self.dense(x)

# 实例化模型
model = ResNet50DenseNet(classes=1000)
# 构建模型
model.build(input_shape=(None, 224, 224, 3))

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# 打印模型结构
model.summary()

模型的结构是

Model: "res_net50_dense_net"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 resnet50 (Functional)       (None, 1000)              25636712  
                                                                 
 dense_net (DenseNet)        multiple                  8068648   
                                                                 
 dense_1 (Dense)             multiple                  2001000   
                                                                 
=================================================================
Total params: 35706360 (136.21 MB)
Trainable params: 35571640 (135.70 MB)
Non-trainable params: 134720 (526.25 KB)
_________________________________________________________________

模型收敛的很慢,所以我把初始学习率调到了0.01

import tensorflow as tf
import os

# 设置初始学习率
initial_learning_rate = 1e-2
opt = tf.keras.optimizers.Adam(learning_rate=initial_learning_rate)
model.compile(optimizer=opt,
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# 设置早停和检查点
early_stopping = tf.keras.callbacks.EarlyStopping(
    monitor='val_loss',
    patience=100,
    verbose=1,
    mode='min',
    restore_best_weights=True
)

checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

model_checkpoint = tf.keras.callbacks.ModelCheckpoint(
    filepath=checkpoint_path, 
    save_weights_only=True,
    monitor='val_accuracy',
    mode='max',
    save_best_only=True,
    verbose=1)

# 设置学习率动态减少
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
    monitor='val_loss',  # 监控模型的验证损失
    factor=0.2,          # 学习率减少的因子,新学习率 = 学习率 * 因子
    patience=5,          # 如果5个epoch内验证损失没有下降,则减少学习率
    min_lr=1e-6,         # 学习率的下限
    verbose=1            # 打印学习率减少的信息
)

# 训练模型
epochs = 1000
history = model.fit(
    train_ds,
    validation_data=val_ds,
    epochs=epochs,
    callbacks=[early_stopping, model_checkpoint, reduce_lr]  # 添加回调
)

训练结果为:

Epoch 1/1000
2024-03-22 23:31:11.946198: I external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:454] Loaded cuDNN version 8904
2024-03-22 23:31:12.988269: I external/local_tsl/tsl/platform/default/subprocess.cc:304] Start cannot spawn child process: No such file or directory
2024-03-22 23:31:17.590245: I external/local_xla/xla/service/service.cc:168] XLA service 0x67681b0 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
2024-03-22 23:31:17.590319: I external/local_xla/xla/service/service.cc:176]   StreamExecutor device (0): Tesla P40, Compute Capability 6.1
2024-03-22 23:31:17.598579: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1711121477.711542   97185 device_compiler.h:186] Compiled cluster using XLA!  This line is logged at most once for the lifetime of the process.
57/57 [==============================] - ETA: 0s - loss: 5.3783 - accuracy: 0.2544
Epoch 1: val_accuracy improved from -inf to 0.26549, saving model to training_1/cp.ckpt
57/57 [==============================] - 161s 656ms/step - loss: 5.3783 - accuracy: 0.2544 - val_loss: 3.8227 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 2/1000
57/57 [==============================] - ETA: 0s - loss: 2.7167 - accuracy: 0.3009
Epoch 2: val_accuracy did not improve from 0.26549
57/57 [==============================] - 15s 271ms/step - loss: 2.7167 - accuracy: 0.3009 - val_loss: 1.9360 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 3/1000
57/57 [==============================] - ETA: 0s - loss: 1.7028 - accuracy: 0.3009
Epoch 3: val_accuracy did not improve from 0.26549
57/57 [==============================] - 16s 289ms/step - loss: 1.7028 - accuracy: 0.3009 - val_loss: 1.5602 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 4/1000
57/57 [==============================] - ETA: 0s - loss: 1.5155 - accuracy: 0.2965
Epoch 4: val_accuracy improved from 0.26549 to 0.31858, saving model to training_1/cp.ckpt
57/57 [==============================] - 21s 377ms/step - loss: 1.5155 - accuracy: 0.2965 - val_loss: 1.4728 - val_accuracy: 0.3186 - lr: 0.0100
Epoch 5/1000
57/57 [==============================] - ETA: 0s - loss: 1.4563 - accuracy: 0.2942
Epoch 5: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.4563 - accuracy: 0.2942 - val_loss: 1.4425 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 6/1000
57/57 [==============================] - ETA: 0s - loss: 1.4332 - accuracy: 0.2788
Epoch 6: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 279ms/step - loss: 1.4332 - accuracy: 0.2788 - val_loss: 1.4213 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 7/1000
57/57 [==============================] - ETA: 0s - loss: 1.4172 - accuracy: 0.3009
Epoch 7: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 279ms/step - loss: 1.4172 - accuracy: 0.3009 - val_loss: 1.4097 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 8/1000
57/57 [==============================] - ETA: 0s - loss: 1.4102 - accuracy: 0.3031
Epoch 8: val_accuracy did not improve from 0.31858
57/57 [==============================] - 17s 292ms/step - loss: 1.4102 - accuracy: 0.3031 - val_loss: 1.4063 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 9/1000
57/57 [==============================] - ETA: 0s - loss: 1.4050 - accuracy: 0.3009
Epoch 9: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 271ms/step - loss: 1.4050 - accuracy: 0.3009 - val_loss: 1.3994 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 10/1000
57/57 [==============================] - ETA: 0s - loss: 1.3983 - accuracy: 0.2854
Epoch 10: val_accuracy did not improve from 0.31858
57/57 [==============================] - 17s 300ms/step - loss: 1.3983 - accuracy: 0.2854 - val_loss: 1.3931 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 11/1000
57/57 [==============================] - ETA: 0s - loss: 1.3944 - accuracy: 0.2677
Epoch 11: val_accuracy did not improve from 0.31858
57/57 [==============================] - 17s 292ms/step - loss: 1.3944 - accuracy: 0.2677 - val_loss: 1.3927 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 12/1000
57/57 [==============================] - ETA: 0s - loss: 1.3944 - accuracy: 0.2721
Epoch 12: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 279ms/step - loss: 1.3944 - accuracy: 0.2721 - val_loss: 1.3858 - val_accuracy: 0.3186 - lr: 0.0100
Epoch 13/1000
57/57 [==============================] - ETA: 0s - loss: 1.3964 - accuracy: 0.2721
Epoch 13: val_accuracy did not improve from 0.31858
57/57 [==============================] - 18s 319ms/step - loss: 1.3964 - accuracy: 0.2721 - val_loss: 1.3837 - val_accuracy: 0.3186 - lr: 0.0100
Epoch 14/1000
57/57 [==============================] - ETA: 0s - loss: 1.3926 - accuracy: 0.2699
Epoch 14: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 287ms/step - loss: 1.3926 - accuracy: 0.2699 - val_loss: 1.3831 - val_accuracy: 0.3186 - lr: 0.0100
Epoch 15/1000
57/57 [==============================] - ETA: 0s - loss: 1.3892 - accuracy: 0.2920
Epoch 15: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 278ms/step - loss: 1.3892 - accuracy: 0.2920 - val_loss: 1.3900 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 16/1000
57/57 [==============================] - ETA: 0s - loss: 1.3870 - accuracy: 0.2765
Epoch 16: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 282ms/step - loss: 1.3870 - accuracy: 0.2765 - val_loss: 1.3827 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 17/1000
57/57 [==============================] - ETA: 0s - loss: 1.3886 - accuracy: 0.2765
Epoch 17: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 276ms/step - loss: 1.3886 - accuracy: 0.2765 - val_loss: 1.3856 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 18/1000
57/57 [==============================] - ETA: 0s - loss: 1.3846 - accuracy: 0.3009
Epoch 18: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 272ms/step - loss: 1.3846 - accuracy: 0.3009 - val_loss: 1.3827 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 19/1000
57/57 [==============================] - ETA: 0s - loss: 1.3832 - accuracy: 0.2743
Epoch 19: val_accuracy did not improve from 0.31858
57/57 [==============================] - 17s 290ms/step - loss: 1.3832 - accuracy: 0.2743 - val_loss: 1.3795 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 20/1000
57/57 [==============================] - ETA: 0s - loss: 1.3829 - accuracy: 0.2412
Epoch 20: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 284ms/step - loss: 1.3829 - accuracy: 0.2412 - val_loss: 1.3793 - val_accuracy: 0.3186 - lr: 0.0100
Epoch 21/1000
57/57 [==============================] - ETA: 0s - loss: 1.3840 - accuracy: 0.2677
Epoch 21: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 282ms/step - loss: 1.3840 - accuracy: 0.2677 - val_loss: 1.3857 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 22/1000
57/57 [==============================] - ETA: 0s - loss: 1.3823 - accuracy: 0.2788
Epoch 22: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 277ms/step - loss: 1.3823 - accuracy: 0.2788 - val_loss: 1.3815 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 23/1000
57/57 [==============================] - ETA: 0s - loss: 1.3860 - accuracy: 0.2699
Epoch 23: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 273ms/step - loss: 1.3860 - accuracy: 0.2699 - val_loss: 1.3849 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 24/1000
57/57 [==============================] - ETA: 0s - loss: 1.3841 - accuracy: 0.2898
Epoch 24: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 274ms/step - loss: 1.3841 - accuracy: 0.2898 - val_loss: 1.3827 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 25/1000
57/57 [==============================] - ETA: 0s - loss: 1.3813 - accuracy: 0.2810
Epoch 25: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 285ms/step - loss: 1.3813 - accuracy: 0.2810 - val_loss: 1.3769 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 26/1000
57/57 [==============================] - ETA: 0s - loss: 1.3818 - accuracy: 0.3009
Epoch 26: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 283ms/step - loss: 1.3818 - accuracy: 0.3009 - val_loss: 1.3759 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 27/1000
57/57 [==============================] - ETA: 0s - loss: 1.3815 - accuracy: 0.2566
Epoch 27: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 272ms/step - loss: 1.3815 - accuracy: 0.2566 - val_loss: 1.3761 - val_accuracy: 0.3186 - lr: 0.0100
Epoch 28/1000
57/57 [==============================] - ETA: 0s - loss: 1.3824 - accuracy: 0.2965
Epoch 28: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 280ms/step - loss: 1.3824 - accuracy: 0.2965 - val_loss: 1.3746 - val_accuracy: 0.3186 - lr: 0.0100
Epoch 29/1000
57/57 [==============================] - ETA: 0s - loss: 1.3828 - accuracy: 0.2765
Epoch 29: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 288ms/step - loss: 1.3828 - accuracy: 0.2765 - val_loss: 1.3728 - val_accuracy: 0.3186 - lr: 0.0100
Epoch 30/1000
57/57 [==============================] - ETA: 0s - loss: 1.3791 - accuracy: 0.2876
Epoch 30: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 279ms/step - loss: 1.3791 - accuracy: 0.2876 - val_loss: 1.3751 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 31/1000
57/57 [==============================] - ETA: 0s - loss: 1.3813 - accuracy: 0.2898
Epoch 31: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 269ms/step - loss: 1.3813 - accuracy: 0.2898 - val_loss: 1.3762 - val_accuracy: 0.3186 - lr: 0.0100
Epoch 32/1000
57/57 [==============================] - ETA: 0s - loss: 1.3790 - accuracy: 0.2788
Epoch 32: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 269ms/step - loss: 1.3790 - accuracy: 0.2788 - val_loss: 1.3776 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 33/1000
57/57 [==============================] - ETA: 0s - loss: 1.3811 - accuracy: 0.2810
Epoch 33: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 275ms/step - loss: 1.3811 - accuracy: 0.2810 - val_loss: 1.3757 - val_accuracy: 0.3186 - lr: 0.0100
Epoch 34/1000
57/57 [==============================] - ETA: 0s - loss: 1.3814 - accuracy: 0.2677
Epoch 34: val_accuracy did not improve from 0.31858

Epoch 34: ReduceLROnPlateau reducing learning rate to 0.0019999999552965165.
57/57 [==============================] - 16s 276ms/step - loss: 1.3814 - accuracy: 0.2677 - val_loss: 1.3756 - val_accuracy: 0.2655 - lr: 0.0100
Epoch 35/1000
57/57 [==============================] - ETA: 0s - loss: 1.3751 - accuracy: 0.3009
Epoch 35: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 275ms/step - loss: 1.3751 - accuracy: 0.3009 - val_loss: 1.3756 - val_accuracy: 0.2655 - lr: 0.0020
Epoch 36/1000
57/57 [==============================] - ETA: 0s - loss: 1.3750 - accuracy: 0.3009
Epoch 36: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 271ms/step - loss: 1.3750 - accuracy: 0.3009 - val_loss: 1.3765 - val_accuracy: 0.2655 - lr: 0.0020
Epoch 37/1000
57/57 [==============================] - ETA: 0s - loss: 1.3740 - accuracy: 0.3009
Epoch 37: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 281ms/step - loss: 1.3740 - accuracy: 0.3009 - val_loss: 1.3752 - val_accuracy: 0.2655 - lr: 0.0020
Epoch 38/1000
57/57 [==============================] - ETA: 0s - loss: 1.3746 - accuracy: 0.2588
Epoch 38: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 271ms/step - loss: 1.3746 - accuracy: 0.2588 - val_loss: 1.3751 - val_accuracy: 0.2655 - lr: 0.0020
Epoch 39/1000
57/57 [==============================] - ETA: 0s - loss: 1.3743 - accuracy: 0.3009
Epoch 39: val_accuracy did not improve from 0.31858

Epoch 39: ReduceLROnPlateau reducing learning rate to 0.0003999999724328518.
57/57 [==============================] - 16s 284ms/step - loss: 1.3743 - accuracy: 0.3009 - val_loss: 1.3760 - val_accuracy: 0.2655 - lr: 0.0020
Epoch 40/1000
57/57 [==============================] - ETA: 0s - loss: 1.3733 - accuracy: 0.3009
Epoch 40: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 275ms/step - loss: 1.3733 - accuracy: 0.3009 - val_loss: 1.3759 - val_accuracy: 0.2655 - lr: 4.0000e-04
Epoch 41/1000
57/57 [==============================] - ETA: 0s - loss: 1.3736 - accuracy: 0.3009
Epoch 41: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3736 - accuracy: 0.3009 - val_loss: 1.3758 - val_accuracy: 0.2655 - lr: 4.0000e-04
Epoch 42/1000
57/57 [==============================] - ETA: 0s - loss: 1.3732 - accuracy: 0.3009
Epoch 42: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 276ms/step - loss: 1.3732 - accuracy: 0.3009 - val_loss: 1.3757 - val_accuracy: 0.2655 - lr: 4.0000e-04
Epoch 43/1000
57/57 [==============================] - ETA: 0s - loss: 1.3733 - accuracy: 0.3009
Epoch 43: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3733 - accuracy: 0.3009 - val_loss: 1.3757 - val_accuracy: 0.2655 - lr: 4.0000e-04
Epoch 44/1000
57/57 [==============================] - ETA: 0s - loss: 1.3734 - accuracy: 0.3009
Epoch 44: val_accuracy did not improve from 0.31858

Epoch 44: ReduceLROnPlateau reducing learning rate to 7.999999215826393e-05.
57/57 [==============================] - 15s 271ms/step - loss: 1.3734 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 4.0000e-04
Epoch 45/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 45: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 8.0000e-05
Epoch 46/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 46: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 284ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 8.0000e-05
Epoch 47/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 47: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 271ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 8.0000e-05
Epoch 48/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 48: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 8.0000e-05
Epoch 49/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 49: val_accuracy did not improve from 0.31858

Epoch 49: ReduceLROnPlateau reducing learning rate to 1.599999814061448e-05.
57/57 [==============================] - 16s 277ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 8.0000e-05
Epoch 50/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 50: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 274ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.6000e-05
Epoch 51/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 51: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 266ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.6000e-05
Epoch 52/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 52: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 283ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.6000e-05
Epoch 53/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 53: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 278ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.6000e-05
Epoch 54/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 54: val_accuracy did not improve from 0.31858

Epoch 54: ReduceLROnPlateau reducing learning rate to 3.199999628122896e-06.
57/57 [==============================] - 16s 275ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.6000e-05
Epoch 55/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 55: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 269ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 3.2000e-06
Epoch 56/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 56: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 273ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 3.2000e-06
Epoch 57/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 57: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 273ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 3.2000e-06
Epoch 58/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 58: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 277ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 3.2000e-06
Epoch 59/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 59: val_accuracy did not improve from 0.31858

Epoch 59: ReduceLROnPlateau reducing learning rate to 1e-06.
57/57 [==============================] - 15s 269ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 3.2000e-06
Epoch 60/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 60: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 61/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 61: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 274ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 62/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 62: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 269ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 63/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 63: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 64/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 64: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 278ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 65/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 65: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 66/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 66: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 279ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 67/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 67: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 272ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 68/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 68: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 271ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 69/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 69: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 277ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 70/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 70: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 268ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 71/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 71: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 275ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 72/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 72: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 274ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 73/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 73: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 271ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 74/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 74: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 272ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 75/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 75: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 268ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 76/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 76: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 273ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 77/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 77: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 271ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 78/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 78: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 79/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 79: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 80/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 80: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 279ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 81/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 81: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 272ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 82/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 82: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 275ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 83/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 83: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 283ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 84/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 84: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 85/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 85: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 272ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 86/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 86: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 274ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 87/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 87: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 267ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 88/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 88: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 266ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 89/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 89: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 271ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 90/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 90: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 268ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 91/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 91: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 268ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 92/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 92: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 267ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 93/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 93: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 268ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 94/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 94: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 266ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 95/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 95: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 260ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 96/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 96: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 275ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 97/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 97: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 267ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 98/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 98: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 265ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 99/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 99: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 266ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 100/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 100: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 269ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 101/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 101: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 102/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 102: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 103/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 103: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 264ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 104/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 104: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 266ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 105/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 105: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 279ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 106/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 106: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 265ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 107/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 107: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 272ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 108/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 108: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 268ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 109/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 109: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 272ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 110/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 110: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 277ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 111/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 111: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 264ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 112/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 112: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 266ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 113/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 113: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 273ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 114/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 114: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 271ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 115/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 115: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 268ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 116/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 116: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 273ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 117/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 117: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 275ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 118/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 118: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 119/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 119: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 269ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 120/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 120: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 284ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 121/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 121: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 272ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 122/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 122: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 272ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 123/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 123: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 262ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 124/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 124: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 270ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 125/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 125: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 273ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 126/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 126: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 271ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 127/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 127: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 271ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 128/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009
Epoch 128: val_accuracy did not improve from 0.31858
57/57 [==============================] - 15s 264ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 129/1000
57/57 [==============================] - ETA: 0s - loss: 1.3731 - accuracy: 0.3009Restoring model weights from the end of the best epoch: 29.

Epoch 129: val_accuracy did not improve from 0.31858
57/57 [==============================] - 16s 277ms/step - loss: 1.3731 - accuracy: 0.3009 - val_loss: 1.3755 - val_accuracy: 0.2655 - lr: 1.0000e-06
Epoch 129: early stopping

非常失败的结果

不知道什么原因,也不会调

  • 3
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

54afive

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值