慧眼识珠——基于CNN神经网络的药用植物10分类

import os
import glob
import numpy as np
import tensorflow as tf
from keras import backend as K
import keras.callbacks
from matplotlib import pyplot

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

def exponent(global_epoch,learning_rate_base,decay_rate,min_learn_rate):
    learning_rate = learning_rate_base * pow(decay_rate, global_epoch)
    learning_rate = max(learning_rate, min_learn_rate)
    return learning_rate

class EDS(keras.callbacks.Callback):
    def __init__(self,
                 learning_rate_base: object,
                 decay_rate: object,
                 global_epoch_init: object = 0,
                 min_learn_rate: float = 0,
                 verbose: object = 0) -> object:
        super(EDS, self).__init__()
        self.learning_rate_base = learning_rate_base
        self.global_epoch_init = global_epoch_init
        self.decay_rate = decay_rate
        self.verbose = verbose
        self.min_learn_rate = min_learn_rate
        self.learning_rate = []

        def on_epoch_end(self, epoch, logs=None):
            self.global_epoch = self.global_epoch + 1
            lr = K.get_value(self.model.optimizer.lr)
            self.learning_rates.append()

        def on_epoch_begin(self, batch, logs=None):
            lr = exponent(global_epoch=self.global_epoch,
                          learning_rate_base=self.learning_rate_base,
                          decay_rate=self.decay_rate,
                          min_learn_rate=self.min_learn_rate)
            K.set_value(self.model.optimizer.lr, lr)
            if self.verbose > 0:
                print('\nBatch %05d: setting learning'
                      'rate to %s.' % (self.global_epoch + 1, lr))

def visu_train_history(train_history, train_metric, validation_metric):
    # 制作图像
    pyplot.plot(train_history.history[train_metric])
    pyplot.plot(train_history.history[validation_metric])
    pyplot.title('Train History')
    pyplot.ylabel(train_metric)
    pyplot.xlabel('epoch')
    pyplot.legend(['train', 'validation'], loc='upper left')
    pyplot.show()


def load_img(path='图片地址', label='图片标签'):
    # 图片格式化处理
    image = tf.io.read_file(path)
    image = tf.image.decode_jpeg(image, channels=3)
    image = tf.image.resize(image, [256, 256])
    image = tf.cast(image, tf.float32)
    image = image / 255
    return image, label


def test_model(model_name, epoch, BATCH_SIZE, train_num, picture_name):
    # 制作模型
    imgs_path = glob.glob('{}/*/*.jpg'.format(picture_name))
    #img_p = imgs_path[1]

    all_labels_name = [img_p.split('\\')[1] for img_p in imgs_path]

    label_names = np.unique(all_labels_name)

    label_to_index = dict((name, i) for i, name in enumerate(label_names))

    index_to_label = dict((v, k) for k, v in label_to_index.items())

    # ---------创建dataset  and   图像处理函数-------
    model_path = 'model/{}.h5'.format(model_name)
    tt = os.path.exists(model_path)

    if not tt:
        all_labels = [label_to_index.get(name) for name in all_labels_name]
        # print(all_labels[6])
        # 0
        # 打乱顺序
        random_index = np.random.permutation(len(imgs_path))
        imgs_path = np.array(imgs_path)[random_index]
        all_labels = np.array(all_labels)[random_index]
        # 处理图片

        i = int(len(imgs_path) * 0.8)

        train_path = imgs_path[:i]
        train_labels = all_labels[:i]
        train_dataset = tf.data.Dataset.from_tensor_slices(
            (train_path, train_labels))
        AUTOTUNE = tf.data.experimental.AUTOTUNE
        train_ds = train_dataset.map(load_img, num_parallel_calls=AUTOTUNE)
        train_ds = train_ds.repeat().shuffle(50).batch(BATCH_SIZE)

        test_path = imgs_path[i:]
        test_labels = all_labels[i:]
        test_dataset = tf.data.Dataset.from_tensor_slices(
            (test_path, test_labels))
        test_ds = test_dataset.map(load_img, num_parallel_calls=AUTOTUNE)
        test_ds = test_ds.batch(BATCH_SIZE)

        # <BatchDataset shapes: ((None, 256, 256, 3), (None,)), types: (tf.float32, tf.int32)>
        # 创建模型
        model = tf.keras.Sequential([
            tf.keras.layers.Conv2D(64, (3, 3), input_shape=(256, 256, 3),
                                   activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.MaxPooling2D(),
            tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.MaxPooling2D(),
            tf.keras.layers.Conv2D(256, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Conv2D(256, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.MaxPooling2D(),
            tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.MaxPooling2D(),
            tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.GlobalAveragePooling2D(),
            tf.keras.layers.Dropout(0.3),
            tf.keras.layers.Dense(1024, activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Dense(train_num)
        ])
        model.summary()

        model.compile(
            optimizer=tf.keras.optimizers.Adam(0.001),
            loss=tf.keras.losses.SparseCategoricalCrossentropy(
                from_logits=True),
            metrics=['acc'])

        train_count = len(train_path)
        test_count = len(test_path)

        steps_per_epoch = train_count // BATCH_SIZE
        validation_steps = test_count // BATCH_SIZE

        init_epoch = 0
        learning_rate_base = 1e-3

        exponent_lr = EDS(learning_rate_base=learning_rate_base,
                          global_epoch_init=init_epoch,
                          decay_rate=0.9,
                          min_learn_rate=0.000001)
        history = model.fit(train_ds, epochs=epoch,
                            validation_data=test_ds,
                            validation_steps=validation_steps,
                            steps_per_epoch=steps_per_epoch,
                            verbose=1,
                            callbacks=[exponent_lr])

        history.history.keys()
        visu_train_history(history, 'acc', 'val_acc')
        visu_train_history(history, 'loss', 'val_loss')
        model.save(model_path)
    else:
        return index_to_label
    return index_to_label

def load_and_perproces_image(path='图片地址'):
    image = tf.io.read_file(path)
    image = tf.image.decode_jpeg(image, channels=3)
    image = tf.image.resize(image, [128, 128])
    image = tf.cast(image, tf.float32)

    image = image / 255

    image = tf.expand_dims(image, axis=0)

    return image

def yuce(test_img='图片地址(绝对/相对)', model_name='model名字,没有则创建'):
    # 预测函数
    index_to_label = {
        0: '人参',
        1: '八角',
        2: '千日红',
        3: '射干',
        4: '枸杞',
        5: '栀子花',
        6: '芦荟',
        7: '苍耳子',
        8: '蒲公英',
        9: '藏红花'}
    model = tf.keras.models.load_model(
        'E:\PyCharm\Flask\慧眼识株\static\model\{}.h5'.format(model_name))
    image = load_and_perproces_image(test_img)

    pred_name_num = model.predict(image)
    # print(index_to_label.get(int(np.argmax(pred_name_num))))
    pre_name = index_to_label.get(int(np.argmax(pred_name_num)))

    pred_tensor = tf.constant(model.predict(image)[0])
    acc_list = tf.nn.softmax(pred_tensor).numpy()
    acc = round(acc_list[int(np.argmax(pred_name_num))], 4)

    return print('预测:',pre_name), print('准确率',acc)

#%%
def pre_path(img_r_path):
    model_name = '10味药99%准确率'  # {}.h5
    pre_name, acc = yuce(img_r_path, model_name)
    # print(img_r_path, '预测该中药为{}'.format(pre_name))
    return pre_name, acc


if __name__ == '__main__':
    BATCH_SIZE = 8  # 每个梯度更新的样本数
    epoch = 200  # 训练次数
    learning_rate_base = 1e-3  # 学习率
    model_name = 'model9.26'  # {}.h5
    train_num = 10  # 制作模型样本数量
    picture_name = 'data'
    test_model(
            model_name,
            epoch,
            BATCH_SIZE,
            train_num,
            picture_name)

    pre_path(r'C:/Users/Administrator/Desktop/666.jpg')

在这里插入图片描述
慧眼识珠
山西中医药大学20级药学班邱强

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值