import os, shutil, random, glob # 添加tensorflow的库 import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers # 添加matplotlib的库进行界面可视化 import matplotlib.pyplot as plt # 变量 resize = 224 # 输入图片尺寸参数 total_num = 100 # 训练集总计和 train_num = 90 # 训练集实际训练数字,20的验证集 epochs = 5 # 迭代次数 batch_size = 8 # 每次训练多少张 # ----------------------------------------------------------------------------- dir_data = r'C:\Users\1\Desktop\Dataset1' # 训练集路径 dir_mask = os.path.join(dir_data, 'mask1') # 戴口罩文件夹路径 dir_nomask = os.path.join(dir_data, 'nomask1') # 没带口罩文件夹路径 # 健壮性的断言 assert os.path.exists(dir_mask), 'Could not find ' + dir_mask assert os.path.exists(dir_nomask), 'Could not find ' + dir_nomask # 定义了文件指针对整个文件夹遍历一遍,将图像读出来 fpath_mask = [os.path.abspath(fp) for fp in glob.glob(os.path.join(dir_mask, '*.png'))] fpath_nomask = [os.path.abspath(fp) for fp in glob.glob(os.path.join(dir_nomask, '*.png'))] # 文件数 num_mask = len(fpath_mask) num_nomask = len(fpath_nomask) # 设置标签 label_mask = [0] * num_mask label_nomask = [1] * num_nomask print('#mask: ', num_mask) print('#nomask: ', num_nomask) # 划分多少为验证集 RATIO_TEST = 0.1 num_mask_test = int(num_mask * RATIO_TEST) num_nomask_test = int(num_nomask * RATIO_TEST) # train fpath_train = fpath_mask[num_mask_test:] + fpath_nomask[num_nomask_test:] label_train = label_mask[num_mask_test:] + label_nomask[num_nomask_test:] # validation fpath_vali = fpath_mask[:num_mask_test] + fpath_nomask[:num_nomask_test] label_vali = label_mask[:num_mask_test] + label_nomask[:num_nomask_test] num_train = len(fpath_train) num_vali = len(fpath_vali) print(num_train) print(num_vali) # 预处理 def preproc(fpath, label): image_byte = tf.io.read_file(fpath) image = tf.io.decode_image(image_byte) image_resize = tf.image.resize_with_pad(image, 224, 224) # 缩放到224*224 image_norm = tf.cast(image_resize, tf.float32) / 255. # 归一化 label_onehot = tf.one_hot(label, 2) return image_norm, label_onehot dataset_train = tf.data.Dataset.from_tensor_slices((fpath_train, label_train)) # 将数据进行预处理 dataset_train = dataset_train.shuffle(num_train).repeat() # 打乱顺序 dataset_train = dataset_train.map(preproc, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset_train = dataset_train.batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE) # 一批次处理多少份 dataset_vali = tf.data.Dataset.from_tensor_slices((fpath_vali, label_vali)) dataset_vali = dataset_vali.shuffle(num_vali).repeat() dataset_vali = dataset_vali.map(preproc, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset_vali = dataset_vali.batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE) # ----------------------------------------------------------------------------- # AlexNet 算法模型 model = keras.Sequential(name='Alexnet') # 第1层 model.add(layers.Conv2D(filters=96, kernel_size=(11, 11), strides=(4, 4), padding='valid', input_shape=(resize, resize, 3), activation='relu')) model.add(layers.BatchNormalization()) # 第1层池化 model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')) # 第2层 model.add(layers.Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu')) model.add(layers.BatchNormalization()) # 第2层池化 model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')) # 第3层 model.add(layers.Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')) # 第4层 model.add(layers.Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')) # 第5层 model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')) # 第6,7,8层 model.add(layers.Flatten()) model.add(layers.Dense(4096, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(4096, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1000, activation='relu')) model.add(layers.Dropout(0.5)) # Output Layer model.add(layers.Dense(2, activation='softmax')) # Training 优化器 model.compile(loss='categorical_crossentropy', optimizer='sgd', # 梯度下降法 metrics=['accuracy']) history = model.fit(dataset_train, steps_per_epoch=num_train // batch_size, epochs=epochs, # 迭代次数 validation_data=dataset_vali, validation_steps=num_vali // batch_size, verbose=1) # 评分标准 # scores = model.evaluate(train_data, train_label, verbose=1) scores = model.evaluate(dataset_train, steps=num_train // batch_size, verbose=1) print(scores) # scores = model.evaluate(test_data, test_label, verbose=1) scores = model.evaluate(dataset_vali, steps=num_vali // batch_size, verbose=1) print(scores) # 保存模型 model.save(r'C:\Users\1\Desktop\mode_save') # Record loss and acc history_dict = history.history train_loss = history_dict['loss'] train_accuracy = history_dict['accuracy'] val_loss = history_dict['val_loss'] val_accuracy = history_dict['val_accuracy'] # Draw loss plt.figure() plt.plot(range(epochs), train_loss, label='train_loss') plt.plot(range(epochs), val_loss, label='val_loss') plt.legend() plt.xlabel('epochs') plt.ylabel('loss') # Draw acc plt.figure() plt.plot(range(epochs), train_accuracy, label='train_accuracy') plt.plot(range(epochs), val_accuracy, label='val_accuracy') plt.legend() plt.xlabel('epochs') plt.ylabel('accuracy') # Display plt.show() print('Task finished') import cv2 from tensorflow.keras.models import load_model resize = 224 label = ('戴口罩!', '没戴口罩') image = cv2.resize(cv2.imread(r'C:\Users\1\Desktop\69.png'),(resize,resize)) #print(cv2.imread(r'C:\Users\1\Desktop\69.png')) image = image.astype("float")/255.0 image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2])) model = load_model(r'C:\Users\1\Desktop\mode_save') predict = model.predict(image) i = predict.argmax(axis=1)[0] print(label[i])
CNN口罩识别
最新推荐文章于 2024-05-27 11:38:19 发布