efficientnet 迁移学习,训练自己的图像数据集,3分类。keras

# 导入开发需要的库
from keras import optimizers, Input
from keras.applications import  imagenet_utils

from keras.preprocessing.image import ImageDataGenerator
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import *
from keras.applications import *

from sklearn.preprocessing import *
from sklearn.model_selection import *
from sklearn.metrics import *
# 导入模块

from efficientnet.keras import EfficientNetB0
import keras.backend as K
import tensorflow as tf
# 绘制训练过程中的 loss 和 acc 变化曲线
import matplotlib.pyplot as plt



# Hyper parameters 超参数
batch_size = 128

width = 65
height = 65
epochs = 50
NUM_TRAIN = 3660
NUM_TEST = 1152
dropout_rate = 0.2
input_shape = (height, width, 3)

train_dir = 'C:\\Users\\SH\\Documents\\WeChat Files\\AV393146198\\FileStorage\\File\\2020-10\\Image-20201516\\train'
validation_dir = 'C:\\Users\\SH\\Documents\\WeChat Files\\AV393146198\\FileStorage\\File\\2020-10\\Image-20201516\\val'

train_datagen = ImageDataGenerator(          # 图像在线数据增强的代码块
      rescale=1./255,
      rotation_range=40,
      width_shift_range=0.2,
      height_shift_range=0.2,
      shear_range=0.2,
      zoom_range=0.2,
      horizontal_flip=True,
      fill_mode='nearest')

# Note that the validation data should not be augmented!
# test_datagen = ImageDataGenerator(rescale=1./255)
# 这里我们使用validation而不用test,且照旧对validation数据集进行和train数据集同等强度的在线数据增强
validation_datagen = ImageDataGenerator(
      rescale=1./255,
      rotation_range=40,
      width_shift_range=0.2,
      height_shift_range=0.2,
      shear_range=0.2,
      zoom_range=0.2,
      horizontal_flip=True,
      fill_mode='nearest')

train_generator = train_datagen.flow_from_directory(
        # This is the target directory
        train_dir,
        # All images will be resized to target height and width.
        target_size=(height, width),
        batch_size=batch_size,
        # Since we use categorical_crossentropy loss, we need categorical labels
        class_mode='categorical')

validation_generator = validation_datagen.flow_from_directory(
        validation_dir,
        target_size=(height, width),
        batch_size=batch_size,
        class_mode='categorical')



def history_plot(history_fit):
    plt.figure(figsize=(12, 6))

    # summarize history for accuracy
    plt.subplot(121)
    plt.plot(history_fit.history["accuracy"])
    plt.plot(history_fit.history["val_accuracy"])
    plt.title("model accuracy")
    plt.ylabel("accuracy")
    plt.xlabel("epoch")
    plt.legend(["train", "valid"], loc="upper left")

    # summarize history for loss
    plt.subplot(122)
    plt.plot(history_fit.history["loss"])
    plt.plot(history_fit.history["val_loss"])
    plt.title("model loss")
    plt.ylabel("loss")
    plt.xlabel("epoch")
    plt.legend(["train", "test"], loc="upper left")

    plt.show()

# fine-tune 模型
def fine_tune_model(model, optimizer, epochs, freeze_num):
    '''
    discription: 对指定预训练模型进行fine-tune,并保存为.hdf5格式

    MODEL:传入的模型,VGG16, ResNet50, ...

    optimizer: fine-tune all layers 的优化器, first part默认用adadelta
    batch_size: 每一批的尺寸,建议32/64/128
    epochs: fine-tune all layers的代数
    freeze_num: first part冻结卷积层的数量
    '''


    for layer in model.layers[:freeze_num]:
        layer.trainable = False

    model.compile(optimizer=optimizer,
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])


    model.fit_generator(
            train_generator,
              epochs=10,
              shuffle=True,
              verbose=1,
              validation_data=validation_generator,
              )
    print('Finish step_1')

    # second: fine-tune all layers
    for layer in model.layers[:]:
        layer.trainable = True

    rc = ReduceLROnPlateau(monitor="val_accuracy",
                           factor=0.2,
                           patience=4,
                           verbose=1,
                           mode='max')

    model_name = model.name + ".hdf5"
    mc = ModelCheckpoint(model_name,
                         monitor="val_accuracy",
                         save_best_only=True,
                         verbose=1,
                         mode='max')
    el = EarlyStopping(monitor="val_accuracy",
                       min_delta=0,
                       patience=5,
                       verbose=1,
                       restore_best_weights=True)

    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=["accuracy"])


    history_fit = model.fit_generator(
                            train_generator,
                            epochs=epochs,
                            shuffle=True,
                            verbose=1,
                            validation_data=validation_generator,
                            callbacks=[mc, rc, el])

    print('Finish fine-tune')
    return history_fit


from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, multiply, Permute, Concatenate, \
    Conv2D, Add, Activation, Lambda
from keras import backend as K

def blinear_efficient__atten_model(img_rows, img_cols):
    K.clear_session()

    in_lay = Input(shape=(img_rows, img_cols, 3))
    base_model = EfficientNetB0(input_shape=(img_rows, img_cols, 3), weights="imagenet", include_top=False)

    pt_depth = base_model.get_output_shape_at(0)[-1]

    cnn_features_a = base_model(in_lay)
    cnn_bn_features_a = BatchNormalization()(cnn_features_a)

    # attention mechanism
    # here we do an attention mechanism to turn pixels in the GAP on an off
    atten_layer = Conv2D(64, kernel_size=(1, 1), padding="same", activation="relu")(Dropout(0.5)(cnn_bn_features_a))
    atten_layer = Conv2D(16, kernel_size=(1, 1), padding="same", activation="relu")(atten_layer)
    atten_layer = Conv2D(8, kernel_size=(1, 1), padding="same", activation="relu")(atten_layer)
    atten_layer = Conv2D(1, kernel_size=(1, 1), padding="valid", activation="sigmoid")(atten_layer)  # H,W,1
    # fan it out to all of the channels
    up_c2_w = np.ones((1, 1, 1, pt_depth))  # 1,1,C
    up_c2 = Conv2D(pt_depth, kernel_size=(1, 1), padding="same", activation="linear", use_bias=False, weights=[up_c2_w])
    up_c2.trainable = True
    atten_layer = up_c2(atten_layer)  # H,W,C

    cnn_atten_out_a = multiply([atten_layer, cnn_bn_features_a])  # H,W,C

    cnn_atten_out_b = cnn_atten_out_a

    cnn_out_dot = multiply([cnn_atten_out_a, cnn_atten_out_b])
    gap_features = GlobalAveragePooling2D()(cnn_out_dot)
    gap_dr = Dropout(0.25)(gap_features)
    dr_steps = Dropout(0.25)(Dense(1000, activation="relu")(gap_dr))
    out_layer = Dense(3, activation="softmax")(dr_steps)

    b_eff_atten_model = Model(inputs=[in_lay], outputs=[out_layer], name="blinear_efficient_atten")

    return b_eff_atten_model


# 创建双线性EfficientNet Attention模型
img_rows,img_cols = 65,65
befficient_model = blinear_efficient__atten_model(img_rows,img_cols)
befficient_model.summary()

optimizer = optimizers.Adam(lr=0.0001)
epochs = 30
freeze_num = 19
befficient_model_history  = fine_tune_model(befficient_model,optimizer,epochs,freeze_num)
befficient_model.save("befficient_model.h5")
history_plot(befficient_model_history)

参考了https://github.com/CodingChaozhang/Deep-Learning/blob/master/13%20AI%E7%A0%94%E4%B9%A0%E7%A4%BE200%E7%A7%8D%E9%B8%9F%E7%B1%BB%E8%AF%86%E5%88%AB/ai%E9%B8%9F%E7%B1%BB%E8%AF%86%E5%88%AB.ipynb

测试代码

# --coding:utf-8--
from keras.preprocessing import image
from keras.models import load_model
# from keras.models import load_model
from PIL import Image





def predict_image(img_path):
    # Read the image and resize it
    img = image.load_img(img_path)
    # Convert it to a Numpy array with target shape.
    x = image.img_to_array(img)
    # Reshape
    x = x.reshape((1,) + x.shape)
    x /= 255.
    result = model.predict([x])[0]

    return result

from keras.models import load_model
from tensorflow import nn
from keras.backend import shape
from keras.layers import Dropout

class FixedDropout(Dropout):
    def _get_noise_shape(self, inputs):
        if self.noise_shape is None:
            return self.noise_shape
        return tuple([shape(inputs)[i] if sh is None else sh for i, sh in enumerate(self.noise_shape)])

customObjects = {
    'swish': nn.swish,
    'FixedDropout': FixedDropout
}

# 载入模型
# model = load_model('./output_model_file/my_model.h5')
model = load_model('blinear_efficient_atten.hdf5', custom_objects=customObjects)

img ='C:\\Users\\SH\\Documents\\WeChat Files\\AV393146198\\FileStorage\\File\\2020-10\\Image-20201516\\val\\0\\000.bmp'
# img_path = './12499.jpg'

# print(predict_image(cat_img))
print(predict_image(img))
# print(predict_image(img))

 

  • 0
    点赞
  • 21
    收藏
    觉得还不错? 一键收藏
  • 6
    评论
要使用yolov3-keras训练自己的数据集,您需要按照以下步骤进行操作: 1. 数据集准备: 首先,您需要准备您自己的数据集,并将其进行标注。您可以使用VOC2007数据集作为参考。确保您的数据集包含正确的文件夹结构,并且您的标注文件与图像文件匹配。 2. 生成训练集和验证集: 根据您的数据集,创建一个用于训练的文件夹和一个用于验证的文件夹。将您的数据集按比例划分到这两个文件夹中。 3. 修改参数文件: 根据您的数据集和需求,修改yolov3.cfg参数文件。确保您根据您的类别数量,修改参数文件中的类别数相关的选项。同时,您还可以根据需要调整其他参数,如输入图片大小等。 4. 修改类别文件: 创建一个voc_classes.txt文件,并在其中列出您的数据集中的类别名称。确保类别名称的顺序与您的标注文件中的类别顺序一致。 5. 修改训练脚本: 修改train.py文件,以便加载您的数据集和参数文件。确保您修改了相关的路径和文件名,并设置正确的训练参数,如批量大小、学习率等。 6. 开始训练: 运行train.py文件,开始训练您的模型。此时,您需要等待训练过程完成。训练时间可能会根据数据集的大小和计算资源的性能而有所不同。 请注意,您还需要安装相应的环境依赖,如tensorflow-gpu和keras,确保您的环境与yolov3-keras的要求一致。另外,您还可以参考相关的行动计划和具体实现内容,以了解更多关于训练自己数据集的细节。 总结起来,要使用yolov3-keras训练自己的数据集,您需要准备数据集、生成训练集和验证集、修改参数文件、修改类别文件、修改训练脚本并开始训练。记得根据自己的需求进行相应的调整和修改。希望这些信息对您有所帮助!

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值