构建基于卷积神经网络的口罩识别模型
一、人脸图像特征提取的各种方法
1. HOG
方向梯度直方图(Histogram of Oriented Gradient, HOG)特征是一种在计算机视觉和图像处理中用来进行物体检测的特征描述子。HOG特征通过计算和统计图像局部区域的梯度方向直方图来构成特征。
HOG特征的总结:把样本图像分割为若干个像素的单元,把梯度方向平均划分为多个区间,在每个单元里面对所有像素的梯度方向在各个方向区间进行直方图统计,得到一个多维的特征向量,每相邻的单元构成一个区间,把一个区间内的特征向量联起来得到多维的特征向量,用区间对样本图像进行扫描,扫描步长为一个单元。最后将所有块的特征串联起来,就得到了人体的特征。至今虽然有很多行人检测算法,但基本都是以HOG+SVM的思路为主。
2. 卷积神经网络特征
卷积神经网络(Convolutional Neural Networks, CNN)是一类包含卷积计算且具有深度结构的前馈神经网络,是深度学习的代表算法之一。卷积神经网络具有表征学习能力,能够按其阶层结构对输入信息进行平移不变分类,因此也被称为“平移不变人工神经网络(Shift-Invariant Artificial Neural Networks, SIANN)”。
卷积神经网络中,第一步一般用卷积核去提取特征,这些初始化的卷积核会在反向传播的过程中,在迭代中被一次又一次的更新,无限地逼近我们的真实解。其实本质没有对图像矩阵求解,而是初始化了一个符合某种分布的特征向量集,然后在反向传播中无限更新这个特征集,让它能无限逼近数学中的那个概念上的特征向量,以致于我们能用特征向量的数学方法对矩阵进行特征提取。
准备工作
①在 Anaconda 中创建 python3.6 版本的TensorFlow环境
②激活 python3.6 的 tensorflow 环境
③使用豆瓣镜像安装 tensorflow
pip install tensorflow -i Simple Index
④激活对应的conda环境
conda activate tensorflow
⑤安装ipykernel
pip install ipykernel -i https://pypi.douban.com/simple
⑥将环境写入notebook的kernel中
python -m ipykernel install --user --name tensorflow --display-name "Python (tensorflow)"
⑦在系统内切换
1. 划分测试集、训练集以及验证集
(1)运行tensorflow环境,导包
import keras
import os
import shutil
(2)读取训练集的图片,将训练数据和测试数据放入自己创建的文件夹3
-
riginal_dataset_dir = 'E:\mask' base_dir = 'mask' os.mkdir(base_dir) train_dir = os.path.join(base_dir, 'train') os.mkdir(train_dir) validation_dir = os.path.join(base_dir, 'validation') os.mkdir(validation_dir) test_dir = os.path.join(base_dir, 'test') os.mkdir(test_dir) train_mask_dir = os.path.join(train_dir, 'mask') os.mkdir(train_mask_dir) train_unmask_dir = os.path.join(train_dir, 'unmask') os.mkdir(train_unmask_dir) validation_mask_dir = os.path.join(validation_dir, 'mask') os.mkdir(validation_mask_dir) validation_unmask_dir = os.path.join(validation_dir, 'unmask') os.mkdir(validation_unmask_dir) test_mask_dir = os.path.join(test_dir, 'mask') os.mkdir(test_mask_dir) test_unmask_dir = os.path.join(test_dir, 'unmask') os.mkdir(test_unmask_dir)
(3)复制图片到文件夹
(4)在jupyter中将文件夹的路径引入
train_mask_dir="E:/mask/train/mask"
train_unmask_dir="E:/mask/train/unmask"
test_mask_dir="E:/mask/train/mask"
test_unmask_dir="E:/mask/train/unmask"
validation_mask_dir="E:/mask/validation/mask/"
validation_unmask_dir="E:/mask/validation/unmask/"
train_dir="E:/mask/train"
test_dir="E:/mask/test"
validation_dir="E:/mask/validation"
(5)打印文件夹下的图片数量
print('total training mask images:', len(os.listdir(train_mask_dir)))
print('total training unmask images:', len(os.listdir(train_unmask_dir)))
print('total testing mask images:', len(os.listdir(test_mask_dir)))
print('total testing unmask images:', len(os.listdir(test_unmask_dir)))
print('total validation mask images:', len(os.listdir(validation_mask_dir)))
print('total validation unmask images:', len(os.listdir(validation_unmask_dir)))
3.创建模型
#创建模型
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
查看模型
model.summary()
4.归一化处理
from keras import optimizers
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
validation_datagen=ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# 目标文件目录
train_dir,
#所有图片的size必须是150x150
target_size=(150, 150),
batch_size=20,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
test_generator = test_datagen.flow_from_directory(test_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
for data_batch, labels_batch in train_generator:
print('data batch shape:', data_batch.shape)
print('labels batch shape:', labels_batch)
break
train_generator.class_indices
5. 训练模型
训练模型:
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=10,
validation_data=validation_generator,
validation_steps=50)
保存模型:
#保存模型
model.save('E:/mask/maskORunmask_1.h5')
6.数据增强
import matplotlib.pyplot as plt
from keras.preprocessing import image
fnames = [os.path.join(train_mask_dir, fname) for fname in os.listdir(train_mask_dir)]
img_path = fnames[3]
img = image.load_img(img_path, target_size=(150, 150))
x = image.img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 0
for batch in datagen.flow(x, batch_size=1):
plt.figure(i)
imgplot = plt.imshow(image.array_to_img(batch[0]))
i += 1
if i % 4 == 0:
break
plt.show()
7.创建网络
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
8.训练模型并保存
#归一化处理
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary')
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=60,
validation_data=validation_generator,
validation_steps=50)
保存模型
model.save('E:/mask/maskORunmask_2.h5')
五、口罩识别
1. 单张图片判别
# 单张图片进行判断 是否戴口罩
import cv2
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
model = load_model('E:/mask/maskORunmask_2.h5')
img_path='E:/mask/test/unmask/test0.jpg'
img = image.load_img(img_path, target_size=(150, 150))
#print(img.size)
img_tensor = image.img_to_array(img)/255.0
img_tensor = np.expand_dims(img_tensor, axis=0)
prediction =model.predict(img_tensor)
print(prediction)
if prediction[0][0]>0.5:
result='未戴口罩'
else:
result='戴口罩'
print(result)
import cv2
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
model = load_model('E:/mask/maskORunmask_2.h5')
img_path='E:/mask/test/mask/test1.jpg'
img = image.load_img(img_path, target_size=(150, 150))
#print(img.size)
img_tensor = image.img_to_array(img)/255.0
img_tensor = np.expand_dims(img_tensor, axis=0)
prediction =model.predict(img_tensor)
print(prediction)
if prediction[0][0]>0.5:
result='未戴口罩'
else:
result='戴口罩'
print(result)
import cv2
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
model = load_model('E:/mask/maskORunmask_2.h5')
img_path='E:/mask/train/mask/15.jpg'
img = image.load_img(img_path, target_size=(150, 150))
#print(img.size)
img_tensor = image.img_to_array(img)/255.0
img_tensor = np.expand_dims(img_tensor, axis=0)
prediction =model.predict(img_tensor)
print(prediction)
if prediction[0][0]>0.5:
result='未戴口罩'
else:
result='戴口罩'
print(result)
import cv2
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
import dlib
from PIL import Image
model = load_model('E:/mask/maskORunmask_2.h5')
detector = dlib.get_frontal_face_detector()
video=cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
def rec(img):
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
dets=detector(gray,1)
if dets is not None:
for face in dets:
left=face.left()
top=face.top()
right=face.right()
bottom=face.bottom()
cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2)
img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150))
img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)
img1 = np.array(img1)/255.
img_tensor = img1.reshape(-1,150,150,3)
prediction =model.predict(img_tensor)
print(prediction)
if prediction[0][0]>0.5:
result='unmask'
else:
result='mask'
cv2.putText(img, result, (left,top), font, 2, (0, 255, 0), 2, cv2.LINE_AA)
cv2.imshow('Video', img)
while video.isOpened():
res, img_rd = video.read()
if not res:
break
rec(img_rd)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()