Dataset: http://vision.ucsd.edu/content/yale-face-database
Image pre-processing
step 1: convert images in gif to images in jpg
import os
from PIL import Image
import filetype
images_dir = "./yalefaces/"
for file in os.listdir(images_dir):
if filetype.guess_extension(images_dir+file) == 'gif':
if os.path.splitext(file)[-1] == '.gif':
Image.open(images_dir+file).save(images_dir+os.path.splitext(file)[0]+'.jpg')
else:
Image.open(images_dir+file).save(images_dir+file+'.jpg')
os.remove(images_dir+file)
step 2: put those images into the directoried named by their name
import os
import shutil
import filetype
images_dir = "./yalefaces/"
for file in os.listdir(images_dir):
if filetype.guess_extension(images_dir+file) == 'jpg':
person_name = file.split('.')[0]
if not os.path.isdir(images_dir+person_name):
os.mkdir(images_dir+person_name)
shutil.move(images_dir+file, images_dir+person_name)
step 3: select ROI of faces from images and resize it
import os
import pickle
images_dir = "./yalefaces/"
persons = []
for file in os.listdir(images_dir):
if os.path.isdir(images_dir+file):
persons.append(file)
persons_dict = {person:ind for person,ind in zip(persons, range(len(persons)))}
with open("persons_dict.pickle", 'wb') as f:
pickle.dump(persons_dict, f)
import cv2
faceCascade = cv2.CascadeClassifier("/usr/local/Cellar/opencv/4.0.1/share/opencv4/haarcascades/haarcascade_frontalface_default.xml")
labels = []
faces = []
for person in persons:
person_dir = os.path.join(images_dir,person)
for file in os.listdir(person_dir):
file_path = os.path.join(person_dir, file)
image = cv2.imread(file_path)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fcs = faceCascade.detectMultiScale(
image_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = 0
)
for (x, y, w, h) in fcs:
faces.append(cv2.resize(image_gray[y:y+h, x:x+w], (150,150)))
labels.append(persons_dict[person])
import os
import filetype
import matplotlib.pyplot as plt
%matplotlib inline
def plot_images(images, labels, rows=11, cols=15, size=(20,20)):
plt.figure(figsize=size)
for i in range(rows * cols):
plt.subplot(rows, cols, i + 1)
plt.title(labels[i])
plt.imshow(images[i], cmap=plt.cm.gray)
plt.xticks(())
plt.yticks(())
with open('persons_dict.pickle','rb') as f:
persons_dict = pickle.load(f)
persons_dict = {v:k for k,v in persons_dict.items()}
plot_images(faces, [persons_dict[label] for label in labels])
step 4: split faces and labels into trainning and test sets
import random
ind = np.array(random.sample(range(len(labels)), 15))
labels_test = [labels[i] for i in ind]
faces_test = [faces[i] for i in ind]
labels_trainning = []
faces_trainning = []
for i in range(len(labels)):
if i not in ind:
labels_trainning.append(labels[i])
faces_trainning.append(faces[i])
Face Recognition with OpenCV built-in recognizer
step 1: trainning and save model
import numpy as np
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.train(faces_trainning, np.array(labels_trainning))
recognizer.save("trainner.yml")
step2: predict faces
labels_predict = []
confds = []
for face in faces_test:
label_predict, confd = recognizer.predict(face)
confds.append(confd)
labels_predict.append(label_predict)
with open('persons_dict.pickle','rb') as f:
persons_dict = pickle.load(f)
persons_dict = {v:k for k,v in persons_dict.items()}
titles = ['true label: ' + persons_dict[label]+'\npredict label: '+persons_dict[label_predict]+'\nconfidence: '+str(confd) for label,label_predict,confd in zip(labels_test, labels_predict, confds)]
plot_images(faces_test, titles, rows=3, cols=5, size=(20,15))