Alexnet网络预测皮肤结果

import tensorflow as tf
import os
import datetime
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras import layers
from keras import models
from keras import optimizers
import matplotlib.pyplot as plt

starttime = datetime.datetime.now()
#搭建框架
model = models.Sequential()
#1卷积层--预处理归一化--最大池化层
model.add(layers.Conv2D(96, (11, 11),strides=(4, 4),padding='same', activation='relu',
                        input_shape=(224, 224, 3)))#padding有两个值,一个是SAME,一个是VALID,
                                                  #如果padding设置为SAME,说明输入图片大小和输出图片大小是一致的
                                                  #如果是VALID则图片经过滤波器后可能会变小
model.add(layers.BatchNormalization(axis=3)) #归一化 批量标准化BN
model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))#最大池化层

#2
model.add(layers.Conv2D(256, (5, 5), strides=(1, 1), padding='same', activation='relu'))
model.add(layers.BatchNormalization(axis=3))
model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

#3
model.add(layers.Conv2D(384, (3, 3),strides=(1, 1), padding='same', activation='relu'))
#4
model.add(layers.Conv2D(384, (3, 3),strides=(1, 1), padding='same', activation='relu'))
#5
model.add(layers.Conv2D(256, (3, 3),strides=(1, 1), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
#FC
model.add(layers.Flatten())
model.add(layers.Dense(4096, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(4096, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(3, activation='softmax'))
model.summary()

model.compile(loss=keras.losses.categorical_crossentropy,   #交叉熵损失函数
              optimizer=optimizers.sgd(lr=0.0001,momentum=0.9,decay=0.0002,nesterov=True),  #优化器
              metrics=['accuracy'])  #指标

train_datagen = ImageDataGenerator(
    rescale=1./255,               #将所有图像乘以1/255缩放
    #rotation_range=10,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,)

test_datagen = ImageDataGenerator(rescale=1./255)
train_dir = 'E:/myjupyter/jupyterNotebook/data/train'
validation_dir = 'E:/myjupyter/jupyterNotebook/data/valid'

#在数据上最终评估模型
train_generator = train_datagen.flow_from_directory(
        train_dir,
        target_size=(224, 224),
        batch_size=60,#batch_size=48,
        class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(
        validation_dir,
        target_size=(224, 224),
        batch_size=60,#batch_size=48,
        class_mode='categorical')

MC = keras.callbacks.ModelCheckpoint(filepath='E:/myjupyter/jupyterNotebook/AlexNet.h5',monitor='val_accuracy',
                                                        verbose=1,
                                                        save_best_only=True,
                                                        save_weights_only=False,
                                                        mode='auto',
                                                        period=1)
RL = keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy',
                                                    factor=0.1,
                                                    patience=10,
                                                    verbose=1,
                                                    mode='auto',
                                                    min_delta=0.000001,
                                                    cooldown=0,
                                                    min_lr=0 )


history = model.fit_generator(
      train_generator,
      steps_per_epoch=8,
      epochs=100,
      validation_data=validation_generator,
      validation_steps=8,
      callbacks=[MC,RL])
model.save('E:/myjupyter/jupyterNotebook/AlexNet.h5')
with open('E:/myjupyter/jupyterNotebook/AlexNet.txt','w') as f:
    f.write(str(history.history))

endtime = datetime.datetime.now()
print( (endtime - starttime).seconds)

#绘制损失函数
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
Using TensorFlow backend.


WARNING:tensorflow:From E:\anaconda3\Anaconda\lib\site-packages\tensorflow\python\framework\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From E:\anaconda3\Anaconda\lib\site-packages\keras\backend\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 56, 56, 96)        34944     
_________________________________________________________________
batch_normalization_1 (Batch (None, 56, 56, 96)        384       
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 27, 27, 96)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 27, 27, 256)       614656    
_________________________________________________________________
batch_normalization_2 (Batch (None, 27, 27, 256)       1024      
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 13, 13, 256)       0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 13, 13, 384)       885120    
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 13, 13, 384)       1327488   
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 13, 13, 256)       884992    
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 6, 6, 256)         0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 9216)              0         
_________________________________________________________________
dense_1 (Dense)              (None, 4096)              37752832  
_________________________________________________________________
dropout_1 (Dropout)          (None, 4096)              0         
_________________________________________________________________
dense_2 (Dense)              (None, 4096)              16781312  
_________________________________________________________________
dropout_2 (Dropout)          (None, 4096)              0         
_________________________________________________________________
dense_3 (Dense)              (None, 3)                 12291     
=================================================================
Total params: 58,295,043
Trainable params: 58,294,339
Non-trainable params: 704
_________________________________________________________________
Found 3959 images belonging to 3 classes.
Found 902 images belonging to 3 classes.
WARNING:tensorflow:From E:\anaconda3\Anaconda\lib\site-packages\tensorflow\python\ops\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Epoch 1/100
8/8 [==============================] - 80s 10s/step - loss: 1.3262 - acc: 0.3667 - val_loss: 1.2410 - val_acc: 0.2771
Epoch 2/100


E:\anaconda3\Anaconda\lib\site-packages\keras\callbacks.py:434: RuntimeWarning: Can save best model only with val_accuracy available, skipping.
  'skipping.' % (self.monitor), RuntimeWarning)
E:\anaconda3\Anaconda\lib\site-packages\keras\callbacks.py:1109: RuntimeWarning: Reduce LR on plateau conditioned on metric `val_accuracy` which is not available. Available metrics are: val_loss,val_acc,loss,acc,lr
  (self.monitor, ','.join(list(logs.keys()))), RuntimeWarning


8/8 [==============================] - 61s 8s/step - loss: 1.3059 - acc: 0.3667 - val_loss: 1.3179 - val_acc: 0.2915
Epoch 3/100
8/8 [==============================] - 73s 9s/step - loss: 1.1592 - acc: 0.4188 - val_loss: 1.1932 - val_acc: 0.3417
Epoch 4/100
8/8 [==============================] - 63s 8s/step - loss: 1.0890 - acc: 0.4562 - val_loss: 1.1376 - val_acc: 0.3720
Epoch 5/100
8/8 [==============================] - 71s 9s/step - loss: 1.0564 - acc: 0.4708 - val_loss: 1.2276 - val_acc: 0.3396
Epoch 6/100
8/8 [==============================] - 64s 8s/step - loss: 1.0599 - acc: 0.5083 - val_loss: 1.3890 - val_acc: 0.3057
Epoch 7/100
8/8 [==============================] - 71s 9s/step - loss: 1.0021 - acc: 0.5188 - val_loss: 1.3176 - val_acc: 0.3521
Epoch 8/100
8/8 [==============================] - 64s 8s/step - loss: 1.0191 - acc: 0.5146 - val_loss: 1.3755 - val_acc: 0.3981
Epoch 9/100
8/8 [==============================] - 75s 9s/step - loss: 1.0184 - acc: 0.5136 - val_loss: 1.4177 - val_acc: 0.3937
Epoch 10/100
8/8 [==============================] - 56s 7s/step - loss: 0.9456 - acc: 0.5729 - val_loss: 1.3378 - val_acc: 0.4076
Epoch 11/100
8/8 [==============================] - 65s 8s/step - loss: 0.9574 - acc: 0.5708 - val_loss: 1.2228 - val_acc: 0.3687
Epoch 12/100
8/8 [==============================] - 59s 7s/step - loss: 0.8748 - acc: 0.6146 - val_loss: 1.4480 - val_acc: 0.3768
Epoch 13/100
8/8 [==============================] - 70s 9s/step - loss: 0.9488 - acc: 0.5813 - val_loss: 1.8216 - val_acc: 0.4042
Epoch 14/100
8/8 [==============================] - 56s 7s/step - loss: 0.9003 - acc: 0.5896 - val_loss: 1.7359 - val_acc: 0.4123
Epoch 15/100
8/8 [==============================] - 70s 9s/step - loss: 0.8738 - acc: 0.6146 - val_loss: 1.2722 - val_acc: 0.4062
Epoch 16/100
8/8 [==============================] - 56s 7s/step - loss: 0.9112 - acc: 0.6250 - val_loss: 1.3083 - val_acc: 0.3318
Epoch 17/100
8/8 [==============================] - 68s 9s/step - loss: 0.8457 - acc: 0.6369 - val_loss: 1.5448 - val_acc: 0.4042
Epoch 18/100
8/8 [==============================] - 58s 7s/step - loss: 0.8579 - acc: 0.6437 - val_loss: 1.4720 - val_acc: 0.4052
Epoch 19/100
8/8 [==============================] - 64s 8s/step - loss: 0.8242 - acc: 0.6542 - val_loss: 1.2904 - val_acc: 0.3854
Epoch 20/100
8/8 [==============================] - 61s 8s/step - loss: 0.8679 - acc: 0.6229 - val_loss: 1.4399 - val_acc: 0.3531
Epoch 21/100
8/8 [==============================] - 64s 8s/step - loss: 0.8108 - acc: 0.6250 - val_loss: 1.4439 - val_acc: 0.3750
Epoch 22/100
8/8 [==============================] - 62s 8s/step - loss: 0.8234 - acc: 0.6688 - val_loss: 1.5132 - val_acc: 0.3934
Epoch 23/100
8/8 [==============================] - 69s 9s/step - loss: 0.7988 - acc: 0.6896 - val_loss: 1.5349 - val_acc: 0.3958
Epoch 24/100
8/8 [==============================] - 56s 7s/step - loss: 0.7888 - acc: 0.6563 - val_loss: 1.3845 - val_acc: 0.3626
Epoch 25/100
8/8 [==============================] - 64s 8s/step - loss: 0.8025 - acc: 0.6743 - val_loss: 1.6345 - val_acc: 0.3750
Epoch 26/100
8/8 [==============================] - 61s 8s/step - loss: 0.7978 - acc: 0.6521 - val_loss: 1.6255 - val_acc: 0.3886
Epoch 27/100
8/8 [==============================] - 65s 8s/step - loss: 0.8157 - acc: 0.6583 - val_loss: 1.3520 - val_acc: 0.3313
Epoch 28/100
8/8 [==============================] - 61s 8s/step - loss: 0.7979 - acc: 0.6667 - val_loss: 1.3040 - val_acc: 0.3720
Epoch 29/100
8/8 [==============================] - 64s 8s/step - loss: 0.7659 - acc: 0.6646 - val_loss: 1.3172 - val_acc: 0.3563
Epoch 30/100
8/8 [==============================] - 62s 8s/step - loss: 0.7652 - acc: 0.6687 - val_loss: 1.4346 - val_acc: 0.3673
Epoch 31/100
8/8 [==============================] - 68s 8s/step - loss: 0.7603 - acc: 0.6708 - val_loss: 1.5694 - val_acc: 0.3937
Epoch 32/100
8/8 [==============================] - 58s 7s/step - loss: 0.7509 - acc: 0.6750 - val_loss: 1.2972 - val_acc: 0.3744
Epoch 33/100
8/8 [==============================] - 65s 8s/step - loss: 0.6969 - acc: 0.7013 - val_loss: 1.4629 - val_acc: 0.3667
Epoch 34/100
8/8 [==============================] - 61s 8s/step - loss: 0.7584 - acc: 0.7125 - val_loss: 1.4420 - val_acc: 0.3957
Epoch 35/100
8/8 [==============================] - 63s 8s/step - loss: 0.7348 - acc: 0.6979 - val_loss: 1.4208 - val_acc: 0.3292
Epoch 36/100
8/8 [==============================] - 62s 8s/step - loss: 0.8385 - acc: 0.6396 - val_loss: 1.3859 - val_acc: 0.4573
Epoch 37/100
8/8 [==============================] - 63s 8s/step - loss: 0.7508 - acc: 0.6917 - val_loss: 1.6024 - val_acc: 0.4521
Epoch 38/100
8/8 [==============================] - 62s 8s/step - loss: 0.6852 - acc: 0.7250 - val_loss: 1.5598 - val_acc: 0.3744
Epoch 39/100
8/8 [==============================] - 66s 8s/step - loss: 0.6988 - acc: 0.7271 - val_loss: 1.6072 - val_acc: 0.3313
Epoch 40/100
8/8 [==============================] - 60s 8s/step - loss: 0.7219 - acc: 0.6708 - val_loss: 1.3861 - val_acc: 0.4005
Epoch 41/100
8/8 [==============================] - 66s 8s/step - loss: 0.7655 - acc: 0.6771 - val_loss: 1.4494 - val_acc: 0.3917
Epoch 42/100
8/8 [==============================] - 58s 7s/step - loss: 0.7119 - acc: 0.7097 - val_loss: 1.4462 - val_acc: 0.4265
Epoch 43/100
8/8 [==============================] - 68s 8s/step - loss: 0.7440 - acc: 0.7000 - val_loss: 1.4885 - val_acc: 0.3896
Epoch 44/100
8/8 [==============================] - 58s 7s/step - loss: 0.7091 - acc: 0.6958 - val_loss: 1.4368 - val_acc: 0.3957
Epoch 45/100
8/8 [==============================] - 66s 8s/step - loss: 0.7328 - acc: 0.6979 - val_loss: 1.3893 - val_acc: 0.3792
Epoch 46/100
8/8 [==============================] - 59s 7s/step - loss: 0.7143 - acc: 0.6958 - val_loss: 1.6773 - val_acc: 0.3673
Epoch 47/100
8/8 [==============================] - 67s 8s/step - loss: 0.7219 - acc: 0.7125 - val_loss: 2.1237 - val_acc: 0.4125
Epoch 48/100
8/8 [==============================] - 57s 7s/step - loss: 0.6654 - acc: 0.7563 - val_loss: 1.8897 - val_acc: 0.4147
Epoch 49/100
8/8 [==============================] - 65s 8s/step - loss: 0.7157 - acc: 0.7000 - val_loss: 1.6742 - val_acc: 0.4167
Epoch 50/100
8/8 [==============================] - 62s 8s/step - loss: 0.7031 - acc: 0.7286 - val_loss: 1.3098 - val_acc: 0.4194
Epoch 51/100
8/8 [==============================] - 72s 9s/step - loss: 0.6600 - acc: 0.7229 - val_loss: 1.5568 - val_acc: 0.4042
Epoch 52/100
8/8 [==============================] - 54s 7s/step - loss: 0.7195 - acc: 0.6958 - val_loss: 2.1551 - val_acc: 0.3863
Epoch 53/100
8/8 [==============================] - 66s 8s/step - loss: 0.6954 - acc: 0.6979 - val_loss: 1.7206 - val_acc: 0.4000
Epoch 54/100
8/8 [==============================] - 59s 7s/step - loss: 0.6964 - acc: 0.7104 - val_loss: 2.0470 - val_acc: 0.4076
Epoch 55/100
8/8 [==============================] - 70s 9s/step - loss: 0.7188 - acc: 0.7062 - val_loss: 2.5343 - val_acc: 0.4083
Epoch 56/100
8/8 [==============================] - 55s 7s/step - loss: 0.7193 - acc: 0.7125 - val_loss: 1.3505 - val_acc: 0.4621
Epoch 57/100
8/8 [==============================] - 65s 8s/step - loss: 0.6465 - acc: 0.7438 - val_loss: 1.3698 - val_acc: 0.4396
Epoch 58/100
8/8 [==============================] - 60s 7s/step - loss: 0.7329 - acc: 0.6994 - val_loss: 1.9122 - val_acc: 0.4289
Epoch 59/100
8/8 [==============================] - 73s 9s/step - loss: 0.7357 - acc: 0.6958 - val_loss: 1.8681 - val_acc: 0.4125
Epoch 60/100
8/8 [==============================] - 53s 7s/step - loss: 0.6683 - acc: 0.7062 - val_loss: 1.4742 - val_acc: 0.3791
Epoch 61/100
8/8 [==============================] - 68s 9s/step - loss: 0.7286 - acc: 0.7042 - val_loss: 1.3970 - val_acc: 0.3917
Epoch 62/100
8/8 [==============================] - 57s 7s/step - loss: 0.6585 - acc: 0.7208 - val_loss: 1.4254 - val_acc: 0.4313
Epoch 63/100
8/8 [==============================] - 67s 8s/step - loss: 0.6740 - acc: 0.7083 - val_loss: 2.1045 - val_acc: 0.4104
Epoch 64/100
8/8 [==============================] - 59s 7s/step - loss: 0.7183 - acc: 0.6896 - val_loss: 1.5051 - val_acc: 0.4455
Epoch 65/100
8/8 [==============================] - 63s 8s/step - loss: 0.6913 - acc: 0.7292 - val_loss: 1.2766 - val_acc: 0.4562
Epoch 66/100
8/8 [==============================] - 63s 8s/step - loss: 0.6995 - acc: 0.7056 - val_loss: 1.4460 - val_acc: 0.4147
Epoch 67/100
8/8 [==============================] - 68s 9s/step - loss: 0.6520 - acc: 0.7438 - val_loss: 1.2550 - val_acc: 0.4479
Epoch 68/100
8/8 [==============================] - 57s 7s/step - loss: 0.6869 - acc: 0.6979 - val_loss: 1.3837 - val_acc: 0.4242
Epoch 69/100
8/8 [==============================] - 63s 8s/step - loss: 0.7526 - acc: 0.6833 - val_loss: 1.2383 - val_acc: 0.4229
Epoch 70/100
8/8 [==============================] - 64s 8s/step - loss: 0.6915 - acc: 0.7187 - val_loss: 1.2248 - val_acc: 0.4218
Epoch 71/100
8/8 [==============================] - 77s 10s/step - loss: 0.6802 - acc: 0.7104 - val_loss: 1.2351 - val_acc: 0.4354
Epoch 72/100
8/8 [==============================] - 62s 8s/step - loss: 0.6869 - acc: 0.7125 - val_loss: 1.2425 - val_acc: 0.4005
Epoch 73/100
8/8 [==============================] - 65s 8s/step - loss: 0.6407 - acc: 0.7417 - val_loss: 1.2938 - val_acc: 0.4292
Epoch 74/100
8/8 [==============================] - 67s 8s/step - loss: 0.5785 - acc: 0.7625 - val_loss: 1.2878 - val_acc: 0.4289
Epoch 75/100
8/8 [==============================] - 71s 9s/step - loss: 0.6106 - acc: 0.7558 - val_loss: 1.5552 - val_acc: 0.4667
Epoch 76/100
8/8 [==============================] - 54s 7s/step - loss: 0.6606 - acc: 0.7229 - val_loss: 1.6769 - val_acc: 0.4621
Epoch 77/100
8/8 [==============================] - 68s 8s/step - loss: 0.6560 - acc: 0.7437 - val_loss: 1.1217 - val_acc: 0.4542
Epoch 78/100
8/8 [==============================] - 58s 7s/step - loss: 0.6295 - acc: 0.7604 - val_loss: 1.2108 - val_acc: 0.4834
Epoch 79/100
8/8 [==============================] - 69s 9s/step - loss: 0.6275 - acc: 0.7563 - val_loss: 1.5719 - val_acc: 0.4125
Epoch 80/100
8/8 [==============================] - 59s 7s/step - loss: 0.6608 - acc: 0.7375 - val_loss: 2.5935 - val_acc: 0.4313
Epoch 81/100
8/8 [==============================] - 68s 9s/step - loss: 0.6683 - acc: 0.7146 - val_loss: 1.4264 - val_acc: 0.4083
Epoch 82/100
8/8 [==============================] - 58s 7s/step - loss: 0.6117 - acc: 0.7625 - val_loss: 1.5997 - val_acc: 0.3389
Epoch 83/100
8/8 [==============================] - 68s 9s/step - loss: 0.6499 - acc: 0.7181 - val_loss: 1.6245 - val_acc: 0.3521
Epoch 84/100
8/8 [==============================] - 57s 7s/step - loss: 0.7113 - acc: 0.7187 - val_loss: 1.5597 - val_acc: 0.4123
Epoch 85/100
8/8 [==============================] - 68s 8s/step - loss: 0.6020 - acc: 0.7708 - val_loss: 1.6130 - val_acc: 0.4167
Epoch 86/100
8/8 [==============================] - 58s 7s/step - loss: 0.6359 - acc: 0.7375 - val_loss: 1.3920 - val_acc: 0.3531
Epoch 87/100
8/8 [==============================] - 67s 8s/step - loss: 0.6170 - acc: 0.7458 - val_loss: 1.4177 - val_acc: 0.4583
Epoch 88/100
8/8 [==============================] - 58s 7s/step - loss: 0.5951 - acc: 0.7688 - val_loss: 1.3512 - val_acc: 0.4147
Epoch 89/100
8/8 [==============================] - 63s 8s/step - loss: 0.6761 - acc: 0.7250 - val_loss: 1.4026 - val_acc: 0.4000
Epoch 90/100
8/8 [==============================] - 60s 8s/step - loss: 0.6443 - acc: 0.7458 - val_loss: 1.1686 - val_acc: 0.4716
Epoch 91/100
8/8 [==============================] - 66s 8s/step - loss: 0.6412 - acc: 0.7496 - val_loss: 1.4738 - val_acc: 0.4312
Epoch 92/100
8/8 [==============================] - 59s 7s/step - loss: 0.6523 - acc: 0.7292 - val_loss: 1.3379 - val_acc: 0.4455
Epoch 93/100
8/8 [==============================] - 68s 9s/step - loss: 0.6636 - acc: 0.7375 - val_loss: 1.3394 - val_acc: 0.4292
Epoch 94/100
8/8 [==============================] - 56s 7s/step - loss: 0.6127 - acc: 0.7562 - val_loss: 1.3043 - val_acc: 0.4194
Epoch 95/100
8/8 [==============================] - 61s 8s/step - loss: 0.6351 - acc: 0.7396 - val_loss: 1.7028 - val_acc: 0.4062
Epoch 96/100
8/8 [==============================] - 64s 8s/step - loss: 0.6415 - acc: 0.7542 - val_loss: 1.2958 - val_acc: 0.4100
Epoch 97/100
8/8 [==============================] - 65s 8s/step - loss: 0.5747 - acc: 0.7771 - val_loss: 1.3651 - val_acc: 0.3729
Epoch 98/100
8/8 [==============================] - 60s 7s/step - loss: 0.6334 - acc: 0.7250 - val_loss: 1.3782 - val_acc: 0.4479
Epoch 99/100
8/8 [==============================] - 66s 8s/step - loss: 0.6810 - acc: 0.7263 - val_loss: 1.5347 - val_acc: 0.4625
Epoch 100/100
8/8 [==============================] - 61s 8s/step - loss: 0.6420 - acc: 0.7125 - val_loss: 1.4338 - val_acc: 0.4526
6350



<Figure size 640x480 with 1 Axes>



<Figure size 640x480 with 1 Axes>
#绘制损失函数
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()

在这里插入图片描述

在这里插入图片描述

日志记录

这个训练模型的val_acc是很低的,虽然训练集的acc有70%左右。但是我们判断它的好坏是通过val_acc的。
这里我犯了一个错误:model.fit_generator()中的steps_per_epoch的值我没弄清楚是怎么赋值的,steps_per_epoch=数据集的总数/batch_size。这样才能遍历所有的数据图片。
另外我改了一下学习率和decay的值,参照学长的经验。如果在这次修改后离合格差不远的话,再尝试调整学习率。如果相差还很大的话,就换用resnet50试试。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值