InceptionV3的keras实现

InceptionV3.py

from tensorflow import keras
from tensorflow.keras import layers

def InceptionBlockA(input, pooling_channel, idx):
    x1 = layers.Conv2D(64, 1, strides=(1, 1), padding='same', use_bias=False, name='inception_a'+str(idx))(input)
    x1 = layers.BatchNormalization(scale=False, name='inception_a'+str(idx)+'_1x1_bn')(x1)
    x1 = layers.Activation('relu', name='inception_a'+str(idx)+'_1x1_relu')(x1)

    x2 = layers.Conv2D(48, 1, strides=(1, 1), padding='same', use_bias=False, name='inception_a'+str(idx)+'_5x5_reduce')(input)
    x2 = layers.BatchNormalization(scale=False, name='inception_a'+str(idx)+'_5x5_reduce_bn')(x2)
    x2 = layers.Activation('relu', name='inception_a'+str(idx)+'_5x5_reduce_relu')(x2)
    x2 = layers.Conv2D(64, 5, strides=(1, 1), padding='same', use_bias=False, name='inception_a'+str(idx)+'_5x5')(x2)
    x2 = layers.BatchNormalization(scale=False, name='inception_a'+str(idx)+'_5x5_bn')(x2)
    x2 = layers.Activation('relu', name='inception_a'+str(idx)+'_5x5_relu')(x2)

    x3 = layers.Conv2D(64, 1, strides=(1, 1), padding='same', use_bias=False, name='inception_a'+str(idx)+'_3x3_reduce')(input)
    x3 = layers.BatchNormalization(scale=False, name='inception_a'+str(idx)+'_3x3_reduce_bn')(x3)
    x3 = layers.Activation('relu', name='inception_a'+str(idx)+'_3x3_reduce_relu')(x3)
    x3 = layers.Conv2D(96, 3, strides=(1, 1), padding='same', use_bias=False, name='inception_a'+str(idx)+'_3x3_1')(x3)
    x3 = layers.BatchNormalization(scale=False, name='inception_a'+str(idx)+'_3x3_1_bn')(x3)
    x3 = layers.Activation('relu', name='inception_a'+str(idx)+'_3x3_1_relu')(x3)
    x3 = layers.Conv2D(96, 3, strides=(1, 1), padding='same', use_bias=False, name='inception_a'+str(idx)+'_3x3_2')(x3)
    x3 = layers.BatchNormalization(scale=False, name='inception_a'+str(idx)+'_3x3_2_bn')(x3)
    x3 = layers.Activation('relu', name='inception_a'+str(idx)+'_3x3_2_relu')(x3)

    x4 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='inception_a'+str(idx)+'_pool')(input)
    x4 = layers.Conv2D(pooling_channel, 1, strides=(1, 1), padding='same', use_bias=False, name='inception_a'+str(idx)+'_pool_proj')(x4)
    x4 = layers.BatchNormalization(scale=False, name='inception_a'+str(idx)+'_pool_proj_bn')(x4)
    x4 = layers.Activation('relu', name='inception_a'+str(idx)+'_pool_proj_relu')(x4)

    return layers.concatenate([x1, x2, x3, x4], name='inception_a'+str(idx)+'_output')


def ReductionA(input):
    x1 = layers.Conv2D(384, 3, strides=(2, 2), padding='valid', use_bias=False, name='reduction_a_3x3')(input)
    x1 = layers.BatchNormalization(scale=False, name='reduction_a_3x3_bn')(x1)
    x1 = layers.Activation('relu', name='reduction_a_3x3_relu')(x1)

    x2 = layers.Conv2D(64, 1, strides=(1, 1), padding='valid', use_bias=False, name='reduction_a_3x3_2_reduce')(input)
    x2 = layers.BatchNormalization(scale=False, name='reduction_a_3x3_2_reduce_bn')(x2)
    x2 = layers.Activation('relu', name='reduction_a_3x3_2_reduce_relu')(x2)
    x2 = layers.Conv2D(96, 3, strides=(1, 1), padding='same', use_bias=False, name='reduction_a_3x3_2')(x2)
    x2 = layers.BatchNormalization(scale=False, name='reduction_a_3x3_2_bn')(x2)
    x2 = layers.Activation('relu', name='reduction_a_3x3_2_relu')(x2)
    x2 = layers.Conv2D(96, 3, strides=(2, 2), padding='valid', use_bias=False, name='reduction_a_3x3_3')(x2)
    x2 = layers.BatchNormalization(scale=False, name='reduction_a_3x3_3_bn')(x2)
    x2 = layers.Activation('relu', name='reduction_a_3x3_3_relu')(x2)

    x3 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name='reduction_a_pool')(input)

    return layers.concatenate([x1, x2, x3], name='reduction_a_concat')


def InceptionBlockB(input, x2_channel=[128, 128, 192], x3_channel=[128, 128, 128, 128, 192], idx=1):
    x1 = layers.Conv2D(192, 1, strides=(1, 1), padding='same', use_bias=False, name='inception_b'+str(idx)+'1_1x1_2')(input)
    x1 = layers.BatchNormalization(scale=False, name='inception_b'+str(idx)+'1_1x1_2_bn')(x1)
    x1 = layers.Activation('relu', name='inception_b'+str(idx)+'1_1x1_2_relu')(x1)

    x2 = layers.Conv2D(x2_channel[0], 1, strides=(1, 1), padding='same', use_bias=False, name='inception_b'+str(idx)+'_1x7_reduce')(input)
    x2 = layers.BatchNormalization(scale=False, name='inception_b'+str(idx)+'_1x7_reduce_bn')(x2)
    x2 = layers.Activation('relu', name='inception_b'+str(idx)+'_1x7_reduce_relu')(x2)
    x2 = layers.Conv2D(x2_channel[1], (7, 1), strides=(1, 1), padding='same', use_bias=False, name='inception_b'+str(idx)+'_1x7')(x2)
    x2 = layers.BatchNormalization(scale=False, name='inception_b'+str(idx)+'_1x7_bn')(x2)
    x2 = layers.Activation('relu', name='inception_b'+str(idx)+'_1x7_relu')(x2)
    x2 = layers.Conv2D(x2_channel[2], (1, 7), strides=(1, 1), padding='same', use_bias=False, name='inception_b'+str(idx)+'_7x1')(x2)
    x2 = layers.BatchNormalization(scale=False, name='inception_b'+str(idx)+'_7x1_bn')(x2)
    x2 = layers.Activation('relu', name='inception_b'+str(idx)+'_7x1_relu')(x2)

    x3 = layers.Conv2D(x3_channel[0], 1, strides=(1, 1), padding='same', use_bias=False, name='inception_b'+str(idx)+'_7x1_reduce')(input)
    x3 = layers.BatchNormalization(scale=False, name='inception_b'+str(idx)+'_7x1_reduce_bn')(x3)
    x3 = layers.Activation('relu', name='inception_b'+str(idx)+'_7x1_reduce_relu')(x3)
    x3 = layers.Conv2D(x3_channel[1], (1, 7), strides=(1, 1), padding='same', use_bias=False, name='inception_b'+str(idx)+'_7x1_2')(x3)
    x3 = layers.BatchNormalization(scale=False, name='inception_b'+str(idx)+'_7x1_2_bn')(x3)
    x3 = layers.Activation('relu', name='inception_b'+str(idx)+'_7x1_2_relu')(x3)
    x3 = layers.Conv2D(x3_channel[2], (7, 1), strides=(1, 1), padding='same', use_bias=False, name='inception_b'+str(idx)+'_1x7_2')(x3)
    x3 = layers.BatchNormalization(scale=False, name='inception_b'+str(idx)+'_1x7_2_bn')(x3)
    x3 = layers.Activation('relu', name='inception_b'+str(idx)+'_1x7_2_relu')(x3)
    x3 = layers.Conv2D(x3_channel[3], (1, 7), strides=(1, 1), padding='same', use_bias=False, name='inception_b'+str(idx)+'_7x1_3')(x3)
    x3 = layers.BatchNormalization(scale=False, name='inception_b'+str(idx)+'_7x1_3_bn')(x3)
    x3 = layers.Activation('relu', name='inception_b'+str(idx)+'_7x1_3_relu')(x3)
    x3 = layers.Conv2D(x3_channel[4], (7, 1), strides=(1, 1), padding='same', use_bias=False, name='inception_b'+str(idx)+'_1x7_3')(x3)
    x3 = layers.BatchNormalization(scale=False, name='inception_b'+str(idx)+'_1x7_3_bn')(x3)
    x3 = layers.Activation('relu', name='inception_b'+str(idx)+'_1x7_3_relu')(x3)

    x4 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='inception_b'+str(idx)+'_pool_ave')(input)
    x4 = layers.Conv2D(192, 1, strides=(1, 1), padding='same', use_bias=False, name='inception_b'+str(idx)+'_1x1')(x4)
    x4 = layers.BatchNormalization(scale=False, name='inception_b'+str(idx)+'_1x1_bn')(x4)
    x4 = layers.Activation('relu', name='inception_b'+str(idx)+'_1x1_relu')(x4)

    return layers.concatenate([x1, x2, x3, x4], name='inception_b'+str(idx)+'_concat')


def ReductionB(input):
    x1 = layers.Conv2D(192, 1, strides=(1, 1), padding='valid', use_bias=False, name='reduction_b_3x3_reduce')(input)
    x1 = layers.BatchNormalization(scale=False, name='reduction_b_3x3_reduce_bn')(x1)
    x1 = layers.Activation('relu', name='reduction_b_3x3_reduce_relu')(x1)
    x1 = layers.Conv2D(320, 3, strides=(2, 2), padding='valid', use_bias=False, name='reduction_b_3x3')(x1)
    x1 = layers.BatchNormalization(scale=False, name='reduction_b_3x3_bn')(x1)
    x1 = layers.Activation('relu', name='reduction_b_3x3_relu')(x1)

    x2 = layers.Conv2D(192, 1, strides=(1, 1), padding='valid', use_bias=False, name='reduction_b_1x7_reduce')(input)
    x2 = layers.BatchNormalization(scale=False, name='reduction_b_1x7_reduce_bn')(x2)
    x2 = layers.Activation('relu', name='reduction_b_1x7_reduce_relu')(x2)
    x2 = layers.Conv2D(192, (7, 1), strides=(1, 1), padding='same', use_bias=False, name='reduction_b_1x7')(x2)
    x2 = layers.BatchNormalization(scale=False, name='reduction_b_1x7_bn')(x2)
    x2 = layers.Activation('relu', name='reduction_b_1x7_relu')(x2)
    x2 = layers.Conv2D(192, (1, 7), strides=(1, 1), padding='same', use_bias=False, name='reduction_b_7x1')(x2)
    x2 = layers.BatchNormalization(scale=False, name='reduction_b_7x1_bn')(x2)
    x2 = layers.Activation('relu', name='reduction_b_7x1_relu')(x2)
    x2 = layers.Conv2D(192, 3, strides=(2, 2), padding='valid', use_bias=False, name='reduction_b_3x3_2')(x2)
    x2 = layers.BatchNormalization(scale=False, name='reduction_b_3x3_2_bn')(x2)
    x2 = layers.Activation('relu', name='reduction_b_3x3_2_relu')(x2)

    x3 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name='reduction_b_pool')(input)

    return layers.concatenate([x1, x2, x3], name='reduction_b_concat')


def InceptionBlockC(input, idx):
    x1 = layers.Conv2D(320, 1, strides=(1, 1), padding='same', use_bias=False, name='inception_c'+str(idx)+'_1x1_2')(input)
    x1 = layers.BatchNormalization(scale=False, name='inception_c'+str(idx)+'_1x1_2_bn')(x1)
    x1 = layers.Activation('relu', name='inception_c'+str(idx)+'_1x1_2_relu')(x1)

    tmp1 = layers.Conv2D(384, 1, strides=(1, 1), padding='same', use_bias=False, name='inception_c'+str(idx)+'_1x3_reduce')(input)
    tmp1 = layers.BatchNormalization(scale=False, name='inception_c'+str(idx)+'_1x3_reduce_bn')(tmp1)
    tmp1 = layers.Activation('relu', name='inception_c'+str(idx)+'_1x3_reduce_relu')(tmp1)

    x2 = layers.Conv2D(384, (3, 1), strides=(1, 1), padding='same', use_bias=False, name='inception_c'+str(idx)+'_1x3')(tmp1)
    x2 = layers.BatchNormalization(scale=False, name='inception_c'+str(idx)+'_1x3_bn')(x2)
    x2 = layers.Activation('relu', name='inception_c'+str(idx)+'_1x3_relu')(x2)

    x3 = layers.Conv2D(384, (1, 3), strides=(1, 1), padding='same', use_bias=False, name='inception_c'+str(idx)+'_3x1')(tmp1)
    x3 = layers.BatchNormalization(scale=False, name='inception_c'+str(idx)+'_3x1_bn')(x3)
    x3 = layers.Activation('relu', name='inception_c'+str(idx)+'_3x1_relu')(x3)

    tmp2 = layers.Conv2D(448, 1, strides=(1, 1), padding='same', use_bias=False, name='inception_c'+str(idx)+'_3x3_reduce')(input)
    tmp2 = layers.BatchNormalization(scale=False, name='inception_c'+str(idx)+'_3x3_reduce_bn')(tmp2)
    tmp2 = layers.Activation('relu', name='inception_c'+str(idx)+'_3x3_reduce_relu')(tmp2)
    tmp2 = layers.Conv2D(384, 3, strides=(1, 1), padding='same', use_bias=False, name='inception_c'+str(idx)+'_3x3')(tmp2)
    tmp2 = layers.BatchNormalization(scale=False, name='inception_c'+str(idx)+'_3x3_bn')(tmp2)
    tmp2 = layers.Activation('relu', name='inception_c'+str(idx)+'_3x3_relu')(tmp2)

    x4 = layers.Conv2D(384, (3, 1), strides=(1, 1), padding='same', use_bias=False, name='inception_c'+str(idx)+'_1x3_2')(tmp2)
    x4 = layers.BatchNormalization(scale=False, name='inception_c'+str(idx)+'_1x3_2_bn')(x4)
    x4 = layers.Activation('relu', name='inception_c'+str(idx)+'_1x3_2_relu')(x4)

    x5 = layers.Conv2D(384, (1, 3), strides=(1, 1), padding='same', use_bias=False, name='inception_c'+str(idx)+'_3x1_2')(tmp2)
    x5 = layers.BatchNormalization(scale=False, name='inception_c'+str(idx)+'_3x1_2_bn')(x5)
    x5 = layers.Activation('relu', name='inception_c'+str(idx)+'_3x1_2_relu')(x5)

    x6 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='inception_c'+str(idx)+'_pool')(input)
    x6 = layers.Conv2D(192, 1, strides=(1, 1), padding='same', use_bias=False, name='inception_c'+str(idx)+'_1x1')(x6)
    x6 = layers.BatchNormalization(scale=False, name='inception_c'+str(idx)+'_1x1_bn')(x6)
    x6 = layers.Activation('relu', name='inception_c'+str(idx)+'_1x1_relu')(x6)

    return layers.concatenate([x1, x2, x3, x4, x5, x6], name='inception_c'+str(idx)+'_concat')


def InceptionV3(input_shape, output_class):
    input = keras.Input(shape=input_shape, name='input')
    x = layers.Conv2D(32, 3, strides=(2, 2), padding='valid', use_bias=False, name='conv1_3x3_s2')(input)
    x = layers.BatchNormalization(scale=False, name='conv1_3x3_s2_bn')(x)
    x = layers.Activation('relu', name='conv1_3x3_relu')(x)

    x = layers.Conv2D(32, 3, strides=(1, 1), padding='valid', use_bias=False, name='conv2_3x3_s1')(x)
    x = layers.BatchNormalization(scale=False, name='conv2_3x3_s1_bn')(x)
    x = layers.Activation('relu', name='conv2_3x3_relu')(x)

    x = layers.Conv2D(64, 3, strides=(1, 1), padding='same', use_bias=False, name='conv3_3x3_s1')(x)
    x = layers.BatchNormalization(scale=False, name='conv3_3x3_s1_bn')(x)
    x = layers.Activation('relu', name='conv3_3x3_relu')(x)

    x = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid', name='pool1_3x3_s2')(x)

    x = layers.Conv2D(80, 1, strides=(1, 1), padding='valid', use_bias=False, name='conv4_3x3_reduce')(x)
    x = layers.BatchNormalization(scale=False, name='conv4_3x3_reduce_bn')(x)
    x = layers.Activation('relu', name='conv4_relu_3x3_reduce')(x)

    x = layers.Conv2D(192, 3, strides=(1, 1), padding='valid', use_bias=False, name='conv4_3x3')(x)
    x = layers.BatchNormalization(scale=False, name='conv4_3x3_bn')(x)
    x = layers.Activation('relu', name='conv4_relu_3x3')(x)

    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name='pool2_3x3_s2')(x)

    x = InceptionBlockA(x, 32, 1)
    x = InceptionBlockA(x, 64, 2)
    x = InceptionBlockA(x, 64, 3)

    x = ReductionA(x)

    x = InceptionBlockB(x, x2_channel=[128, 128, 192], x3_channel=[128, 128, 128, 128, 192], idx=1)
    x = InceptionBlockB(x, x2_channel=[160, 160, 192], x3_channel=[160, 160, 160, 160, 192], idx=2)
    x = InceptionBlockB(x, x2_channel=[160, 160, 192], x3_channel=[160, 160, 160, 160, 192], idx=3)
    x = InceptionBlockB(x, x2_channel=[192, 192, 192], x3_channel=[192, 192, 192, 192, 192], idx=4)

    x = ReductionB(x)

    x = InceptionBlockC(x, 1)
    x = InceptionBlockC(x, 2)

    # x = layers.AveragePooling2D((8, 8), name='pool_8x8_s1')(x)
    # x = layers.Flatten(name='flatten')(x)
    x = layers.GlobalAveragePooling2D(name='pool_8x8_s1')(x)
    x = layers.Dense(output_class, activation='softmax', name='dense')(x)

    model = keras.Model(input, x, name='inceptionv3')

    model.summary()
    return model

if __name__ == '__main__':
    model = InceptionV3((299, 299, 3), 1000)
    print('Done.')

train_inceptionv3.py

import numpy as np
import cv2
from InceptionV3 import InceptionV3
from tensorflow.keras import backend as K   # K.set_image_dim_ordering('tf')
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, LearningRateScheduler, ReduceLROnPlateau
from tensorflow.keras import optimizers
import matplotlib.pyplot as plt

NUM_CLASSES = 2
INPUT_IMG_SIZE = 299

def show_train_history(train_history, train, validation):
    plt.plot(train_history.history[train])
    plt.plot(train_history.history[validation])
    plt.title('Train history')
    plt.ylabel(train)
    plt.xlabel('Epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.show()

def generate_arrays_from_file(root_path, lines, num_classes, batch_size):
    num = len(lines)   # 数据总个数
    i = 0
    while 1:
        x_images_normalize = []
        y_labels_onehot = []

        # 获取一个batch_size大小的数据
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(lines)
            img_path = root_path + "\\image\\train\\" + lines[i].split(";")[0]  # cat dog datasets
            # img_path = root_path + lines[i].split(" ")[0]                     # FlowerPhotos datasets
            img = cv2.imread(img_path)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = cv2.resize(img, (INPUT_IMG_SIZE, INPUT_IMG_SIZE))
            img = img / 255
            x_images_normalize.append(img)
            label = to_categorical(lines[i].split(";")[1], num_classes=num_classes)    # cat dog datasets
            # label = to_categorical(lines[i].split(" ")[1], num_classes=num_classes)  # FlowerPhotos datasets
            y_labels_onehot.append(label)
            i = (i+1) % num  # 遍历一个epoch重新打乱数据
        x_images_normalize = np.array(x_images_normalize)
        #x_images_normalize = x_images_normalize.reshape(-1, 224, 224, 3)
        y_labels_onehot = np.array(y_labels_onehot)
        yield (x_images_normalize, y_labels_onehot)

if __name__ == "__main__":
    anchors = np.zeros((9, 4))

    lines = []

    ### cat dog dataset
    root_path = r"D:\03.Data\01.CatDog"
    with open(root_path + "\\dataset.txt") as f:
       lines = f.readlines()

    ### FlowerPhotos dataset
    # root_path = r"D:\03.Data\02.FlowerPhotos"
    # with open(root_path + "\\train.txt") as f:
    #   lines = f.readlines()

    np.random.shuffle(lines)  # 随机打乱数据
    val_ratio = 0.1
    val_num = int(len(lines) * val_ratio)
    val_lines = lines[0:val_num]
    train_lines = lines[val_num:]

    model = InceptionV3((INPUT_IMG_SIZE, INPUT_IMG_SIZE, 3), NUM_CLASSES)

    # 保存的方式,3个epoch保存一次
    # https://keras-zh.readthedocs.io/callbacks/#modelcheckpoint
    log_path = r"D:\02.Work\00.LearnML\003.Net\InceptionV3\log\\"
    checkpoint = ModelCheckpoint(log_path + "weights.epoch{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.hdf5",
                                 monitor='accuracy',  # loss或accuracy或val_loss或val_accuracy或lr
                                 verbose=0,
                                 save_best_only=False,  #  True
                                 save_weights_only=False,
                                 period=1)

    # 学习率下降的方式,acc三次不下降就下降学习率继续训练
    # https://keras-zh.readthedocs.io/callbacks/#reducelronplateau
    reduce_lr = ReduceLROnPlateau(monitor='accuracy', factor=0.5, patience=3) # monitor=loss、accuracy、val_loss、val_accuracy、lr

    # 当val_loss一直不下降的时候意味着模型基本训练完毕,可以停止
    # https://keras-zh.readthedocs.io/callbacks/#earlystopping
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=10, verbose=0)

    # 交叉熵损失
    # opti = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    opti = optimizers.Adam(lr=1e-3)
    model.compile(optimizer=opti, loss='categorical_crossentropy', metrics=['accuracy'])

    batch_size = 128//8
    print('Train on {} samples, val on {} samples, with batch size {}.'.format(len(train_lines), len(val_lines), batch_size))

    # 开始训练
    train_history = model.fit_generator(generate_arrays_from_file(root_path, train_lines, NUM_CLASSES, batch_size),
                                        steps_per_epoch=max(len(train_lines)//batch_size, 1),
                                        epochs=10,
                                        verbose=1,
                                        callbacks=[checkpoint, reduce_lr, early_stopping],
                                        validation_data=generate_arrays_from_file(root_path, val_lines, NUM_CLASSES, batch_size),
                                        validation_steps=max(len(val_lines)//batch_size, 1),
                                        validation_freq=1,
                                        initial_epoch=0)

    # 保存模型
    model.save_weights(log_path+'inceptionv3.h5')

    show_train_history(train_history, 'accuracy', 'val_accuracy')
    show_train_history(train_history, 'loss', 'val_loss')
    print('Done.')

    # 评估模型准确率
    # scores = model.evaluate(x_img_test_normalize, y_label_test_onehot, verbose=0)
    # print(len(scores))

    # 进行预测
    # prediction = model.predict_classes(x_test_image_normalize)
    # prediction[:10]   # 查看预测结果的前10项数据

predict_inceptionv3.py

import matplotlib.pyplot as plt

from InceptionV3 import InceptionV3
import cv2
import numpy as np
from tensorflow.keras import backend as K   # K.set_image_dim_ordering('tf')
from tensorflow.keras.utils import to_categorical

INPUT_IMG_SIZE = 299
NUM_CLASSES = 2
label_dict = {0:'CAT', 1:'DOG'}

def show_predict_probability(y_gts, predictions, x_imgs, predict_probabilitys, idx):
    for i in range(len(label_dict)):
        print(label_dict[i]+', Probability:%1.9f'%(predict_probabilitys[idx][i]))
    print('label: ', label_dict[int(y_gts[idx])], ', predict: ', label_dict[predictions[idx]])
    plt.figure(figsize=(2, 2))
    plt.imshow(np.reshape(x_imgs[idx], (INPUT_IMG_SIZE, INPUT_IMG_SIZE, 3)))
    plt.show()

def plot_images_labels_prediction(images, labels, prediction, idx, num):
    fig = plt.gcf()
    fig.set_size_inches(12, 14)
    if num>25: num=25
    for i in range(0, num):
        ax = plt.subplot(2, 5, 1+i)
        ax.imshow(images[idx], cmap='binary')
        title = 'labels='+str(labels[idx])
        if len(prediction) > 0:
            title += "prediction="+str(prediction[idx])
        ax.set_title(title, fontsize=10)
        idx += 1
    plt.show()

if __name__ == '__main__':
    log_path = r"D:\02.Work\00.LearnML\003.Net\InceptionV3\log\\"
    model = InceptionV3((299, 299, 3), NUM_CLASSES)
    #model.load_weights(log_path+"inceptionv3.h5")
    model.load_weights(log_path+"weights.epoch008-loss0.125-val_loss0.164.hdf5")

    ### cat dog dataset
    lines = []
    root_path = r"D:\03.Data\01.CatDog"
    with open(root_path + "\\test.txt") as f:
       lines = f.readlines()

    x_images_normalize = []
    y_labels_onehot = []
    y_labels = []

    for i in range(len(lines)):
        img_path = lines[i].split(";")[0]
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (INPUT_IMG_SIZE, INPUT_IMG_SIZE))
        img = img / 255
        x_images_normalize.append(img)
        label = to_categorical(lines[i].split(";")[1], num_classes=NUM_CLASSES)
        y_labels_onehot.append(label)
        y_labels.append(lines[i].split(";")[1])
    x_images_normalize = np.array(x_images_normalize)
    # x_images_normalize = x_images_normalize.reshape(-1, INPUT_IMG_SIZE, INPUT_IMG_SIZE, 3)
    y_labels_onehot = np.array(y_labels_onehot)

    predict_probability = model.predict(x_images_normalize, verbose=1)
    predict = np.argmax(predict_probability, axis=1)

    plot_images_labels_prediction(x_images_normalize, y_labels, predict, 0, 10)
    show_predict_probability(y_labels, predict, x_images_normalize, predict_probability, 0)
    print('done')
  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值