tensorflow2.0建立tensor管道式输入

import tensorflow as tf
import numpy as pd
import matplotlib.pyplot as plt
im_height=224
im_width=224
class_num=1000
input_image = tf.keras.Input(shape=(im_height, im_width, 3), dtype="float32") # output(None, 224, 224, 3)
x1 = tf.keras.layers.ZeroPadding2D(((1, 2), (1, 2)))(input_image)             # output(None, 227, 227, 3)
x2 = tf.keras.layers.Conv2D(48, kernel_size=11, strides=4, activation="relu")(x1) # output(None, 55, 55, 48)
x3 = tf.keras.layers.MaxPool2D(pool_size=3, strides=2)(x2)                              # output(None, 27, 27, 48)
x4 = tf.keras.layers.Conv2D(128, kernel_size=5, padding="same", activation="relu")(x3)  # output(None, 27, 27, 128)
x5 = tf.keras.layers.MaxPool2D(pool_size=3, strides=2)(x4)                              # output(None, 13, 13, 128)
x6 = tf.keras.layers.Conv2D(192, kernel_size=3, padding="same", activation="relu")(x5)  # output(None, 13, 13, 192)
x7 = tf.keras.layers.Conv2D(192, kernel_size=3, padding="same", activation="relu")(x6)  # output(None, 13, 13, 192)
x8 = tf.keras.layers.Conv2D(128, kernel_size=3, padding="same", activation="relu")(x7)  # output(None, 13, 13, 128)
x9 = tf.keras.layers.MaxPool2D(pool_size=3, strides=2)(x8)                              # output(None, 6, 6, 128)
x10 = tf.keras.layers.Flatten()(x9)                         # output(None, 6*6*128)
x11 = tf.keras.layers.Dropout(0.2)(x10)
x12 = tf.keras.layers.Dense(2048, activation="relu")(x11)    # output(None, 2048)
x13 = tf.keras.layers.Dropout(0.2)(x12)
x14 = tf.keras.layers.Dense(2048, activation="relu")(x13)    # output(None, 2048)
x15 = tf.keras.layers.Dense(class_num)(x14)                  # output(None, 5)
predict = tf.keras.layers.Softmax()(x15)
model = tf.keras.Model(inputs=input_image, outputs=predict)
tf.keras.utils.plot_model(model,show_shapes=True)

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-cVtfZUj8-1590063248570)(output_3_0.png)]

from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
from model import AlexNet_v1, AlexNet_v2
import tensorflow as tf
import json
import os
data_root = os.path.abspath(os.path.join(os.getcwd(), "../.."))  # get data root path #os.getcwd() 返回当前目录
data_root
'D:\\tf\\helodoger-deep-learning-for-image-processing-master\\deep-learning-for-image-processing'
data_root #加上 data_root 忽略最后2个
'D:\\tf\\helodoger-deep-learning-for-image-processing-master\\deep-learning-for-image-processing'
image_path = data_root + "/data_set/flower_data/"  # flower data set path
image_path
'D:\\tf\\helodoger-deep-learning-for-image-processing-master\\deep-learning-for-image-processing/data_set/flower_data/'
train_dir = image_path + "train"
train_dir
'D:\\tf\\helodoger-deep-learning-for-image-processing-master\\deep-learning-for-image-processing/data_set/flower_data/train'
validation_dir = image_path + "val"
validation_dir
'D:\\tf\\helodoger-deep-learning-for-image-processing-master\\deep-learning-for-image-processing/data_set/flower_data/val'
if not os.path.exists("save_weights"):
    os.makedirs("save_weights")
im_height = 224
im_width = 224
batch_size = 32
epochs = 30
train_image_generator = ImageDataGenerator(rescale=1. / 255,
                                           horizontal_flip=True)#水平翻转
validation_image_generator = ImageDataGenerator(rescale=1. / 255)
train_data_gen = train_image_generator.flow_from_directory(directory=train_dir,
                                                           batch_size=batch_size,#batch_size = 32
                                                           shuffle=True,
                                                           target_size=(im_height, im_width),
                                                           class_mode='categorical')
Found 3306 images belonging to 5 classes.
total_train = train_data_gen.n
total_train
3306
class_indices = train_data_gen.class_indices
class_indices #train_data_gen 返回字典种类
{'daisy': 0, 'dandelion': 1, 'roses': 2, 'sunflowers': 3, 'tulips': 4}
inverse_dict = dict((val, key) for key, val in class_indices.items())
inverse_dict
{0: 'daisy', 1: 'dandelion', 2: 'roses', 3: 'sunflowers', 4: 'tulips'}
json_str = json.dumps(inverse_dict, indent=4) #dumps是将dict转化成str格式,loads是将str转化成dict格式。
json_str
'{\n    "0": "daisy",\n    "1": "dandelion",\n    "2": "roses",\n    "3": "sunflowers",\n    "4": "tulips"\n}'
with open('class_indices.json', 'w') as json_file:
    json_file.write(json_str)
val_data_gen = validation_image_generator.flow_from_directory(directory=validation_dir,
                                                              batch_size=batch_size,
                                                              shuffle=False,
                                                              target_size=(im_height, im_width),
                                                              class_mode='categorical')
Found 364 images belonging to 5 classes.
total_val = val_data_gen.n
total_val
364
sample_training_images, sample_training_labels = next(train_data_gen)  # label is one-hot coding


# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.
def plotImages(images_arr):
    fig, axes = plt.subplots(1, 5, figsize=(20, 20))
    axes = axes.flatten()
    for img, ax in zip(images_arr, axes):
        ax.imshow(img)
        ax.axis('off')
    plt.tight_layout()
    plt.show()


plotImages(sample_training_images[:5])

在这里插入图片描述

fig, axes = plt.subplots(1, 5, figsize=(20, 20))
axes = axes.flatten() #像素平铺
for img, ax in zip(sample_training_images[:5], axes):
    ax.imshow(img)
    ax.axis('off')
plt.tight_layout()
plt.show()

##fig,ax = plt.subplots()的意思是,建立一个fig对象,建立一个axis对象,更加简洁。
#fig=plt.figure()
#ax=fig.add_subplot(2,3)

在这里插入图片描述

model = AlexNet_v1(im_height=im_height, im_width=im_width, class_num=5)
model.summary()
Model: "model_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         [(None, 224, 224, 3)]     0         
_________________________________________________________________
zero_padding2d_1 (ZeroPaddin (None, 227, 227, 3)       0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 55, 55, 48)        17472     
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 27, 27, 48)        0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 27, 27, 128)       153728    
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 13, 13, 128)       0         
_________________________________________________________________
conv2d_7 (Conv2D)            (None, 13, 13, 192)       221376    
_________________________________________________________________
conv2d_8 (Conv2D)            (None, 13, 13, 192)       331968    
_________________________________________________________________
conv2d_9 (Conv2D)            (None, 13, 13, 128)       221312    
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 6, 6, 128)         0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 4608)              0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 4608)              0         
_________________________________________________________________
dense_3 (Dense)              (None, 2048)              9439232   
_________________________________________________________________
dropout_3 (Dropout)          (None, 2048)              0         
_________________________________________________________________
dense_4 (Dense)              (None, 2048)              4196352   
_________________________________________________________________
dense_5 (Dense)              (None, 5)                 10245     
_________________________________________________________________
softmax_1 (Softmax)          (None, 5)                 0         
=================================================================
Total params: 14,591,685
Trainable params: 14,591,685
Non-trainable params: 0
_________________________________________________________________
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005),
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
              metrics=["accuracy"])
callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath='./save_weights/myAlex.h5',
                                                save_best_only=True,
                                                save_weights_only=True,
                                                monitor='val_loss')]
history = model.fit(x=train_data_gen,
                    steps_per_epoch=total_train // batch_size,
                    epochs=epochs,
                    validation_data=val_data_gen,
                    validation_steps=total_val // batch_size,
                    callbacks=callbacks)
Train for 103 steps, validate for 11 steps
Epoch 1/30
103/103 [==============================] - 15s 149ms/step - loss: 1.4408 - accuracy: 0.3607 - val_loss: 1.3226 - val_accuracy: 0.4574
Epoch 2/30
103/103 [==============================] - 12s 121ms/step - loss: 1.1442 - accuracy: 0.5318 - val_loss: 1.0794 - val_accuracy: 0.5767
Epoch 3/30
103/103 [==============================] - 12s 116ms/step - loss: 1.0333 - accuracy: 0.5883 - val_loss: 0.9879 - val_accuracy: 0.6278
Epoch 4/30
103/103 [==============================] - 12s 120ms/step - loss: 0.9646 - accuracy: 0.6133 - val_loss: 0.9258 - val_accuracy: 0.6420
Epoch 5/30
103/103 [==============================] - 12s 118ms/step - loss: 0.8905 - accuracy: 0.6515 - val_loss: 0.8920 - val_accuracy: 0.6392
Epoch 6/30
103/103 [==============================] - 12s 116ms/step - loss: 0.8443 - accuracy: 0.6741 - val_loss: 0.9505 - val_accuracy: 0.6108
Epoch 7/30
103/103 [==============================] - 12s 115ms/step - loss: 0.8094 - accuracy: 0.6808 - val_loss: 0.9455 - val_accuracy: 0.6449
Epoch 8/30
103/103 [==============================] - 12s 116ms/step - loss: 0.7814 - accuracy: 0.6949 - val_loss: 0.9095 - val_accuracy: 0.6648
Epoch 9/30
103/103 [==============================] - 12s 119ms/step - loss: 0.7001 - accuracy: 0.7257 - val_loss: 0.8424 - val_accuracy: 0.6733
Epoch 10/30
103/103 [==============================] - 12s 119ms/step - loss: 0.7032 - accuracy: 0.7361 - val_loss: 0.8533 - val_accuracy: 0.6847
Epoch 11/30
103/103 [==============================] - 12s 117ms/step - loss: 0.6558 - accuracy: 0.7489 - val_loss: 0.8198 - val_accuracy: 0.6818
Epoch 12/30
103/103 [==============================] - 12s 115ms/step - loss: 0.6147 - accuracy: 0.7673 - val_loss: 0.7819 - val_accuracy: 0.6761
Epoch 13/30
103/103 [==============================] - 12s 114ms/step - loss: 0.5582 - accuracy: 0.7792 - val_loss: 0.8366 - val_accuracy: 0.7244
Epoch 14/30
103/103 [==============================] - 12s 116ms/step - loss: 0.5588 - accuracy: 0.7758 - val_loss: 0.7194 - val_accuracy: 0.7358
Epoch 15/30
103/103 [==============================] - 12s 116ms/step - loss: 0.5063 - accuracy: 0.8042 - val_loss: 0.7614 - val_accuracy: 0.7273
Epoch 16/30
103/103 [==============================] - 12s 114ms/step - loss: 0.4656 - accuracy: 0.8247 - val_loss: 0.8683 - val_accuracy: 0.7301
Epoch 17/30
103/103 [==============================] - 12s 115ms/step - loss: 0.4415 - accuracy: 0.8314 - val_loss: 0.9315 - val_accuracy: 0.6818
Epoch 18/30
103/103 [==============================] - 12s 121ms/step - loss: 0.4009 - accuracy: 0.8470 - val_loss: 0.7711 - val_accuracy: 0.7415
Epoch 19/30
103/103 [==============================] - 12s 114ms/step - loss: 0.3703 - accuracy: 0.8616 - val_loss: 0.9879 - val_accuracy: 0.6847
Epoch 20/30
103/103 [==============================] - 12s 114ms/step - loss: 0.3892 - accuracy: 0.8540 - val_loss: 0.8547 - val_accuracy: 0.7188
Epoch 21/30
103/103 [==============================] - 12s 114ms/step - loss: 0.3548 - accuracy: 0.8693 - val_loss: 0.8747 - val_accuracy: 0.7443
Epoch 22/30
103/103 [==============================] - 12s 114ms/step - loss: 0.3259 - accuracy: 0.8757 - val_loss: 1.0008 - val_accuracy: 0.7159
Epoch 23/30
103/103 [==============================] - 12s 115ms/step - loss: 0.3165 - accuracy: 0.8830 - val_loss: 0.8972 - val_accuracy: 0.7443
Epoch 24/30
103/103 [==============================] - 12s 114ms/step - loss: 0.2736 - accuracy: 0.9004 - val_loss: 0.8848 - val_accuracy: 0.7330
Epoch 25/30
103/103 [==============================] - 12s 117ms/step - loss: 0.2136 - accuracy: 0.9221 - val_loss: 0.8879 - val_accuracy: 0.7330
Epoch 26/30
103/103 [==============================] - 12s 120ms/step - loss: 0.2099 - accuracy: 0.9206 - val_loss: 1.0295 - val_accuracy: 0.7330
Epoch 27/30
103/103 [==============================] - 12s 118ms/step - loss: 0.2113 - accuracy: 0.9261 - val_loss: 1.1168 - val_accuracy: 0.7301
Epoch 28/30
103/103 [==============================] - 12s 117ms/step - loss: 0.1878 - accuracy: 0.9285 - val_loss: 0.9840 - val_accuracy: 0.7443
Epoch 29/30
103/103 [==============================] - 12s 119ms/step - loss: 0.1598 - accuracy: 0.9423 - val_loss: 1.0963 - val_accuracy: 0.7443
Epoch 30/30
103/103 [==============================] - 12s 117ms/step - loss: 0.1703 - accuracy: 0.9417 - val_loss: 1.1542 - val_accuracy: 0.6960
plt.plot(history.epoch,history.history.get('loss'),label='loss',color='limegreen',linewidth='2')
plt.plot(history.epoch,history.history.get('val_loss'),label='val_loss',color='tomato',linewidth='2')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend(loc='best')
<matplotlib.legend.Legend at 0x22387473788>

在这里插入图片描述

plt.plot(history.epoch,history.history.get('accuracy'),label='accuracy',color='lightsteelblue',linewidth='2')
plt.plot(history.epoch,history.history.get('val_accuracy'),label='val_accuracy',color='red',linewidth='2')
plt.xlabel('epochs')
plt.ylabel('acuracy')
plt.legend(loc='best')
<matplotlib.legend.Legend at 0x224ec5c1548>

在这里插入图片描述

y_pre = model.predict(train_data_gen)
train_dir
'D:\\tf\\helodoger-deep-learning-for-image-processing-master\\deep-learning-for-image-processing/data_set/flower_data/train'
import glob
all_image_paths = glob.glob(r'../*')
all_image_paths
['..\\analyze_weights_featuremap',
 '..\\README.md',
 '..\\Test1_official_demo',
 '..\\Test2_alexnet',
 '..\\Test3_vgg',
 '..\\Test4_goolenet',
 '..\\Test5_resnet',
 '..\\Test6_mobilenet']
train_data_gen.class_indices
{'daisy': 0, 'dandelion': 1, 'roses': 2, 'sunflowers': 3, 'tulips': 4}
train_data_gen.batch_size
32
train_data_gen.labels
array([0, 0, 0, ..., 4, 4, 4])
data_1 = os.path.abspath(os.path.join(os.getcwd(), "../.."))  # get data root path #os.getcwd() 返回当前目录
data_1
'D:\\tf\\helodoger-deep-learning-for-image-processing-master\\deep-learning-for-image-processing'
data_1 = os.path.abspath(os.path.join(os.getcwd() +"1"))#直接后面加
data_1
'D:\\tf\\helodoger-deep-learning-for-image-processing-master\\deep-learning-for-image-processing\\tensorflow_learning\\Test2_alexnet1'
data_1 = os.path.abspath(os.path.join(os.getcwd() ))
data_1
'D:\\tf\\helodoger-deep-learning-for-image-processing-master\\deep-learning-for-image-processing\\tensorflow_learning\\Test2_alexnet'
data_1 = os.path.abspath(os.path.join(os.getcwd(),"../../..")) # /..退格
data_1
'D:\\tf\\helodoger-deep-learning-for-image-processing-master'

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

潘诺西亚的火山

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值