深度学习—利用TensorFlow2实现狗狗品种品种(DenseNet121实现)

全是代码和效果图,具体注解,回看利用resnet50的那篇博客就行

# 引入相关的库
import tensorflow as tf
from tensorflow.keras.preprocessing import image
import pathlib
import os
import numpy as np
import random
import glob
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import pandas as pd
import matplotlib.pyplot as plt
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
tf.config.experimental.set_virtual_device_configuration(
    gpus[0],
    [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=7000)])
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# 获取图片路径
train_root = '/home/hyb/狗狗识别/dogImages/train'
test_root = '/home/hyb/狗狗识别/dogImages/test'
valid_root = '/home/hyb/狗狗识别/dogImages/valid'
# 定义超参数,避免在编译,训练的代码中进行硬编码,方便调整超参数
weigth = 224
heigth = 224
channels = 3
batch_size = 28
num_classes = 133
# 数据增强
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
    preprocessing_function = tf.keras.applications.densenet.preprocess_input,
    rotation_range= 40,
    width_shift_range= 0.2,
    height_shift_range= 0.2,
    shear_range= 0.2,
    zoom_range= 0.2,
    horizontal_flip= True,
    fill_mode= 'nearest'
)
# 主要是读取到图片目录下的所以狗狗种类和图片,对图片进行预处理
train_generator = train_datagen.flow_from_directory(
    train_root,
    target_size=(weigth,heigth),
    batch_size=batch_size,
    seed = 7,
    shuffle= True,
    class_mode = "categorical"
)


valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
    preprocessing_function = tf.keras.applications.densenet.preprocess_input,
)
# 主要是读取到图片目录下的所以狗狗种类和图片,对图片进行预处理
valid_generator = train_datagen.flow_from_directory(
    valid_root,
    target_size=(weigth,heigth),
    batch_size=batch_size,
    seed = 7,
    shuffle= False,
    class_mode = "categorical"
)

train_num = train_generator.samples
valid_num = valid_generator.samples
print("训练集中的图片数量:",train_num)
print("验证集中的图片数量:",valid_num)
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
    preprocessing_function = tf.keras.applications.densenet.preprocess_input,
)
test_generator = train_datagen.flow_from_directory(
    test_root,
    target_size=(weigth,heigth),
    batch_size=batch_size,
#     seed = 7,
#     shuffle= False,
#     class_mode = "categorical"
)
test_num = test_generator.samples
print("训练集中的图片数量:",test_num)
# 查看训练数据的维度保证和(batch_size,weight,height,channels)设置的超参数是一致的
## tip:如果(28,133)打印出来的维度不是预期的,可能是 '/home/hyb/狗狗识别/dogImages/train'这个路径下缺少了train,没有完全读到狗狗种类文件
for i in range(2):
    x,y = train_generator.next()
    print(x.shape,y.shape)
    print(y)
densenet_fine_tune = tf.keras.models.Sequential()
densenet_fine_tune.add(tf.keras.applications.DenseNet121(include_top = False,pooling = 'avg',weights = 'imagenet'))
# 修改输出神经元对应狗狗种类
densenet_fine_tune.add(tf.keras.layers.Dense(num_classes, activation = 'softmax'))
densenet_fine_tune.layers[0].trainable = False

# 对模型进行编译
densenet_fine_tune.compile(loss="categorical_crossentropy",
                           optimizer="Adam", metrics=['accuracy'])
densenet_fine_tune.summary()
# 设置哨兵,用来存储quan# 设置哨兵,用来存储权重模型,和数据收敛,模型再也学习不到新内容的时候及时收敛
checkpoint_path = "/home/hyb/狗狗识别/dogImages/saved_models/checkpoint_densenet/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
checkpoint_filepath = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                                 verbose= 1,
                                                 save_weights_only= True,
                                                  save_best_only=True)
callback_dog=tf.keras.callbacks.EarlyStopping(patience=5,min_delta=1e-3)
history = densenet_fine_tune.fit(train_generator,
                              steps_per_epoch=train_num // batch_size,
                             epochs=50,
                             validation_data=valid_generator,
                             validation_steps= valid_num // batch_size,
                             callbacks=[checkpoint_filepath,callback_dog])

# 加载权重
densenet_fine_tune.load_weights(checkpoint_path)

# 评估模型
loss,acc = densenet_fine_tune.evaluate(test_generator,verbose=1)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))

在这里插入图片描述

# 存储模型
densenet_fine_tune.save('/home/hyb/狗狗识别/dogImages/saved_models/model_h5/checkpoint_densenet/my_model.h5') 
# 重新创建完全相同的模型,包括其权重和优化程序
new_model = tf.keras.models.load_model('/home/hyb/狗狗识别/dogImages/saved_models/model_h5/checkpoint_densenet/my_model.h5')

# 显示网络结构
new_model.summary()
# 获取狗狗种类,以便后续对狗狗进行识别
files = sorted(os.listdir('./dogImages/test/'))
print(files)
# 狗狗的133个种类
files=['001.Affenpinscher', '002.Afghan_hound', '003.Airedale_terrier', '004.Akita', '005.Alaskan_malamute', '006.American_eskimo_dog', '007.American_foxhound', '008.American_staffordshire_terrier', '009.American_water_spaniel', '010.Anatolian_shepherd_dog', '011.Australian_cattle_dog', '012.Australian_shepherd', '013.Australian_terrier', '014.Basenji', '015.Basset_hound', '016.Beagle', '017.Bearded_collie', '018.Beauceron', '019.Bedlington_terrier', '020.Belgian_malinois', '021.Belgian_sheepdog', '022.Belgian_tervuren', '023.Bernese_mountain_dog', '024.Bichon_frise', '025.Black_and_tan_coonhound', '026.Black_russian_terrier', '027.Bloodhound', '028.Bluetick_coonhound', '029.Border_collie', '030.Border_terrier', '031.Borzoi', '032.Boston_terrier', '033.Bouvier_des_flandres', '034.Boxer', '035.Boykin_spaniel', '036.Briard', '037.Brittany', '038.Brussels_griffon', '039.Bull_terrier', '040.Bulldog', '041.Bullmastiff', '042.Cairn_terrier', '043.Canaan_dog', '044.Cane_corso', '045.Cardigan_welsh_corgi', '046.Cavalier_king_charles_spaniel', '047.Chesapeake_bay_retriever', '048.Chihuahua', '049.Chinese_crested', '050.Chinese_shar-pei', '051.Chow_chow', '052.Clumber_spaniel', '053.Cocker_spaniel', '054.Collie', '055.Curly-coated_retriever', '056.Dachshund', '057.Dalmatian', '058.Dandie_dinmont_terrier', '059.Doberman_pinscher', '060.Dogue_de_bordeaux', '061.English_cocker_spaniel', '062.English_setter', '063.English_springer_spaniel', '064.English_toy_spaniel', '065.Entlebucher_mountain_dog', '066.Field_spaniel', '067.Finnish_spitz', '068.Flat-coated_retriever', '069.French_bulldog', '070.German_pinscher', '071.German_shepherd_dog', '072.German_shorthaired_pointer', '073.German_wirehaired_pointer', '074.Giant_schnauzer', '075.Glen_of_imaal_terrier', '076.Golden_retriever', '077.Gordon_setter', '078.Great_dane', '079.Great_pyrenees', '080.Greater_swiss_mountain_dog', '081.Greyhound', '082.Havanese', '083.Ibizan_hound', '084.Icelandic_sheepdog', '085.Irish_red_and_white_setter', '086.Irish_setter', '087.Irish_terrier', '088.Irish_water_spaniel', '089.Irish_wolfhound', '090.Italian_greyhound', '091.Japanese_chin', '092.Keeshond', '093.Kerry_blue_terrier', '094.Komondor', '095.Kuvasz', '096.Labrador_retriever', '097.Lakeland_terrier', '098.Leonberger', '099.Lhasa_apso', '100.Lowchen', '101.Maltese', '102.Manchester_terrier', '103.Mastiff', '104.Miniature_schnauzer', '105.Neapolitan_mastiff', '106.Newfoundland', '107.Norfolk_terrier', '108.Norwegian_buhund', '109.Norwegian_elkhound', '110.Norwegian_lundehund', '111.Norwich_terrier', '112.Nova_scotia_duck_tolling_retriever', '113.Old_english_sheepdog', '114.Otterhound', '115.Papillon', '116.Parson_russell_terrier', '117.Pekingese', '118.Pembroke_welsh_corgi', '119.Petit_basset_griffon_vendeen', '120.Pharaoh_hound', '121.Plott', '122.Pointer', '123.Pomeranian', '124.Poodle', '125.Portuguese_water_dog', '126.Saint_bernard', '127.Silky_terrier', '128.Smooth_fox_terrier', '129.Tibetan_mastiff', '130.Welsh_springer_spaniel', '131.Wirehaired_pointing_griffon', '132.Xoloitzcuintli', '133.Yorkshire_terrier']
# 对狗狗种类进行预测:我这张是哈士奇,对应的是5号,虽然只有3%,但都比绝大多数的评分要多
from keras.preprocessing.image import load_img, img_to_array
pic_dog = '/home/hyb/下载/6d97155392b885da3b55dd249c337433.jpg'
pic_dog = load_img(pic_dog,target_size=(224,224))
pic_dog = img_to_array(pic_dog)
pic_dog = pic_dog/255
pic_dog = pic_dog.reshape(1,224,224,3)
result = densenet_fine_tune.predict(pic_dog)
# print(result)
count = 0
for i in result[0]:
    percent = '%.3f%%'%(i*100)
    print("{}的概率:{}".format(files[count],percent))
    count += 1

在这里插入图片描述
在这里插入图片描述

# 模型学习曲线
def plot_learning_curves(history, label, epcohs, min_value, max_value):
    data = {}
    data[label] = history.history[label]
    data['val_'+label] = history.history['val_'+label]
    pd.DataFrame(data).plot(figsize=(8, 5))
    plt.grid(True)
    plt.axis([0, 20, min_value, max_value])
    plt.show()
    
plot_learning_curves(history, 'accuracy', 20, 0, 1)
plot_learning_curves(history, 'loss', 20, 0, 2)
  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值