以AlexNet为例说明如何使用keras进行网络搭建、训练、预测

网络1创建

AlexNet.py

from tensorflow import keras
from tensorflow.keras import layers

def AlexNet(output_shape=2):
    model = keras.Sequential()
    model.add(keras.Input(shape=(227, 227, 3)))
    model.add(layers.Conv2D(96, (11, 11), strides=(4, 4), activation='relu', name='conv1'))
    model.add(layers.MaxPool2D((3, 3), strides=(2, 2), name='maxpool1'))
    model.add(layers.Conv2D(256, (5, 5), strides=(1, 1), padding='same', activation='relu', name='conv2'))
    model.add(layers.MaxPool2D((3, 3), strides=(2, 2), name='maxpool2'))
    model.add(layers.Conv2D(384, (3, 3), strides=(1, 1), padding='same', activation='relu', name='conv3'))
    model.add(layers.Conv2D(384, (3, 3), strides=(1, 1), padding='same', activation='relu', name='conv4'))
    model.add(layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', name='conv5'))
    model.add(layers.MaxPool2D((3, 3), strides=(2, 2), name='maxpool3'))
    model.add(layers.Flatten())
    model.add(layers.Dense(4096, activation='relu', name='dense1'))
    model.add(layers.Dense(4096, activation='relu', name='dense2'))
    model.add(layers.Dense(output_shape, activation='softmax', name='dense3'))
    #model.summary()

    return model

if __name__ == '__main__':
    model = AlexNet()
    model.summary()
网络2创建

AlexNetReduce.py

from tensorflow import keras
from tensorflow.keras import layers

def AlexNetReduce(reduce_scale=2,output_shape=2):
    model = keras.Sequential()
    model.add(keras.Input(shape=(227, 227, 3)))
    model.add(layers.Conv2D(96//reduce_scale, (11, 11), strides=(4, 4), activation='relu', name='conv1'))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPool2D((3, 3), strides=(2, 2), name='maxpool1'))
    model.add(layers.Conv2D(256//reduce_scale, (5, 5), strides=(1, 1), padding='same', activation='relu', name='conv2'))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPool2D((3, 3), strides=(2, 2), name='maxpool2'))
    model.add(layers.Conv2D(384//reduce_scale, (3, 3), strides=(1, 1), padding='same', activation='relu', name='conv3'))
    model.add(layers.Conv2D(384//reduce_scale, (3, 3), strides=(1, 1), padding='same', activation='relu', name='conv4'))
    model.add(layers.Conv2D(256//reduce_scale, (3, 3), strides=(1, 1), padding='same', activation='relu', name='conv5'))
    model.add(layers.MaxPool2D((3, 3), strides=(2, 2), name='maxpool3'))
    model.add(layers.Flatten())
    model.add(layers.Dense(4096//reduce_scale, activation='relu', name='dense1'))
    model.add(layers.Dense(4096//reduce_scale, activation='relu', name='dense2'))
    model.add(layers.Dense(output_shape, activation='softmax', name='dense3'))
    #model.summary()

    return model

if __name__ == '__main__':
    model = AlexNetReduce()
    model.summary()
训练

train.py

import numpy as np
import cv2
from AlexNet import AlexNet
from AlexNetReduce import AlexNetReduce
from tensorflow.keras import backend as K   # K.set_image_dim_ordering('tf')
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, LearningRateScheduler, ReduceLROnPlateau
from tensorflow.keras import optimizers
import matplotlib.pyplot as plt

NUM_CLASSES = 5

def show_train_history(train_history, train, validation):
    plt.plot(train_history.history[train])
    plt.plot(train_history.history[validation])
    plt.title('Train history')
    plt.ylabel(train)
    plt.xlabel('Epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.show()

def generate_arrays_from_file(root_path, lines, num_classes, batch_size):
    num = len(lines)   # 数据总个数
    i = 0
    while 1:
        x_images_normalize = []
        y_labels_onehot = []

        # 获取一个batch_size大小的数据
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(lines)
            # img_path = root_path + "\\image\\train\\" + lines[i].split(";")[0]  # cat dog datasets
            img_path = root_path + lines[i].split(" ")[0]
            img = cv2.imread(img_path)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = cv2.resize(img, (227, 227))
            img = img / 255
            x_images_normalize.append(img)
            # label = to_categorical(lines[i].split(";")[1], num_classes=num_classes)  # cat dog datasets
            label = to_categorical(lines[i].split(" ")[1], num_classes=num_classes)
            y_labels_onehot.append(label)
            i = (i+1) % num  # 遍历一个epoch重新打乱数据
        x_images_normalize = np.array(x_images_normalize)
        #x_images_normalize = x_images_normalize.reshape(-1, 227, 227, 3)
        y_labels_onehot = np.array(y_labels_onehot)
        yield (x_images_normalize, y_labels_onehot)

if __name__ == "__main__":
    lines = []

    ### cat dog dataset
    # root_path = r"D:\CatDog"
    # with open(root_path + "\\dataset.txt") as f:
    #    lines = f.readlines()

    ### FlowerPhotos dataset
    root_path = r"D:\FlowerPhotos"
    with open(root_path + "\\train.txt") as f:
       lines = f.readlines()

    np.random.shuffle(lines)  # 随机打乱数据
    val_ratio = 0.1
    val_num = int(len(lines) * val_ratio)
    val_lines = lines[0:val_num]
    train_lines = lines[val_num:]

    # 此处可以用创建的其它网络来替代
    # model = AlexNet()
    model = AlexNetReduce(4, NUM_CLASSES)

    # 保存的方式,3个epoch保存一次
    # https://keras-zh.readthedocs.io/callbacks/#modelcheckpoint
    log_path = r"D:\AlexNet\log\\"
    checkpoint = ModelCheckpoint(log_path + "weights.epoch{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.hdf5",
                                 monitor='accuracy',  # loss或accuracy或val_loss或val_accuracy或lr
                                 verbose=0,
                                 save_best_only=False,  #  True
                                 save_weights_only=False,
                                 period=1)

    # 学习率下降的方式,acc三次不下降就下降学习率继续训练
    # https://keras-zh.readthedocs.io/callbacks/#reducelronplateau
    reduce_lr = ReduceLROnPlateau(monitor='accuracy', factor=0.5, patience=3) # monitor=loss、accuracy、val_loss、val_accuracy、lr

    # 当val_loss一直不下降的时候意味着模型基本训练完毕,可以停止
    # https://keras-zh.readthedocs.io/callbacks/#earlystopping
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0)

    # 交叉熵损失
    # opti = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    opti = optimizers.Adam(lr=1e-3)
    model.compile(optimizer=opti, loss='categorical_crossentropy', metrics=['accuracy'])

    batch_size = 128//2
    print('Train on {} samples, val on {} samples, with batch size {}.'.format(len(train_lines), len(val_lines), batch_size))

    # 开始训练
    train_history = model.fit_generator(generate_arrays_from_file(root_path, train_lines, NUM_CLASSES, batch_size),
                                        steps_per_epoch=max(len(train_lines)//batch_size, 1),
                                        epochs=10,
                                        verbose=1,
                                        callbacks=[checkpoint, reduce_lr, early_stopping],
                                        validation_data=generate_arrays_from_file(root_path, val_lines, NUM_CLASSES, batch_size),
                                        validation_steps=max(len(val_lines)//batch_size, 1),
                                        validation_freq=1,
                                        initial_epoch=0)

    # 保存模型
    model.save_weights(log_path+'alexnet.h5')

    show_train_history(train_history, 'accuracy', 'val_accuracy')
    show_train_history(train_history, 'loss', 'val_loss')

    # 评估模型准确率
    # scores = model.evaluate(x_img_test_normalize, y_label_test_onehot, verbose=0)
    # print(len(scores))

    # 进行预测
    # prediction = model.predict_classes(x_test_image_normalize)
    # prediction[:10]   # 查看预测结果的前10项数据
预测

predict.py

import matplotlib.pyplot as plt

from AlexNet import AlexNet
from AlexNetReduce import AlexNetReduce
import cv2
import numpy as np
from tensorflow.keras import backend as K   # K.set_image_dim_ordering('tf')
from tensorflow.keras.utils import to_categorical

NUM_CLASSES = 5
label_dict = {0:'DAISY', 1:'DANDELION', 2:'ROSES', 3:'SUNFLOWERS', 4:'TULIPS'}

def show_predict_probability(y_gts, predictions, x_imgs, predict_probabilitys, idx):
    for i in range(len(label_dict)):
        print(label_dict[i]+', Probability:%1.9f'%(predict_probabilitys[idx][i]))
    print('label: ', label_dict[int(y_gts[idx])], ', predict: ', label_dict[predictions[idx]])
    plt.figure(figsize=(2, 2))
    plt.imshow(np.reshape(x_imgs[idx], (227, 227, 3)))
    plt.show()

def plot_images_labels_prediction(images, labels, prediction, idx, num):
    fig = plt.gcf()
    fig.set_size_inches(12, 14)
    if num>25: num=25
    for i in range(0, num):
        ax = plt.subplot(2, 5, 1+i)
        ax.imshow(images[idx], cmap='binary')
        title = 'labels='+str(labels[idx])
        if len(prediction) > 0:
            title += ", prediction="+str(prediction[idx])
        ax.set_title(title, fontsize=10)
        idx += 1
    plt.show()

if __name__ == '__main__':
    log_path = r"D:\AlexNet\log\\"
    # model = AlexNet()
    model = AlexNetReduce(reduce_scale=2, output_shape=NUM_CLASSES)
    model.load_weights(log_path+"alexnet.h5")

    ### FlowerPhotos dataset
    lines = []
    root_path = r"D:\FlowerPhotos"
    with open(root_path + "\\test.txt") as f:
       lines = f.readlines()

    x_images_normalize = []
    y_labels_onehot = []
    y_labels = []

    for i in range(len(lines)):
        img_path = root_path + lines[i].split(" ")[0]
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (227, 227))
        img = img / 255
        x_images_normalize.append(img)
        label = to_categorical(lines[i].split(" ")[1], num_classes=NUM_CLASSES)
        y_labels_onehot.append(label)
        y_labels.append(lines[i].split(" ")[1])
    x_images_normalize = np.array(x_images_normalize)
    # x_images_normalize = x_images_normalize.reshape(-1, 227, 227, 3)
    y_labels_onehot = np.array(y_labels_onehot)

    predict = model.predict_classes(x_images_normalize, verbose=1)
    predict_probability = model.predict(x_images_normalize, verbose=1)

    plot_images_labels_prediction(x_images_normalize, y_labels_onehot, predict, 0, 10)
    show_predict_probability(y_labels, predict, x_images_normalize, predict_probability, 0)
  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值