1.环境配置(Anaconda下)
pip install opencv-python (--upgrade)
pip install opencv-contrib-python (--upgrade)
# 根据情况可能还需要升级numpy
pip install numpy --upgrade
2.源码
(1)文件内容:
data中存放“haarcascade_frontalface_default.xml”分类器文件,文件来源于OpenCV 3源代码中的data/haarcascades。
自己新建dataset和trainer文件夹。
dataset用来存放训练数据集;
trainer用来存放生成的模型。
(2)源代码:
i. face_datasets.py
# Import OpenCV2 for image processing
import cv2
# Start capturing video
vid_cam = cv2.VideoCapture(0)
# Detect object in video stream using Haarcascade Frontal Face
face_detector = cv2.CascadeClassifier('./data/haarcascade_frontalface_default.xml')
# For each person, one face id
face_id = 2
# Initialize sample face image
count = 0
# Start looping
while(True):
# Capture video frame
_, image_frame = vid_cam.read()
# Convert frame to grayscale
gray = cv2.cvtColor(image_frame, cv2.COLOR_BGR2GRAY)
# Detect frames of different sizes, list of faces rectangles
faces = face_detector.detectMultiScale(gray, 1.3, 5)
# Loops for each faces
for (x,y,w,h) in faces:
# Crop the image frame into rectangle
cv2.rectangle(image_frame, (x,y), (x+w,y+h), (255,0,0), 2)
# Increment sample face image
count += 1
# Save the captured image into the datasets folder
cv2.imwrite("./dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])
# Display the video frame, with bounded rectangle on the person's face
cv2.imshow('frame', image_frame)
# To stop taking video, press 'q' for at least 100ms
if cv2.waitKey(100) & 0xFF == ord('q'):
break
# If image taken reach 100, stop taking video
elif count > 100:
break
# Stop video
vid_cam.release()
# Close all started windows
cv2.destroyAllWindows()
ii. training.py
# Import OpenCV2 for image processing
# Import os for file path
import cv2, os
# Import numpy for matrix calculation
import numpy as np
# Import Python Image Library (PIL)
from PIL import Image
# Create Local Binary Patterns Histograms for face recognization
recognizer = cv2.face.LBPHFaceRecognizer_create()
# Using prebuilt frontal face training model, for face detection
detector = cv2.CascadeClassifier("./data/haarcascade_frontalface_default.xml")
# Create method to get the images and label data
def getImagesAndLabels(path):
# Get all file path
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
# Initialize empty face sample
faceSamples=[]
# Initialize empty id
ids = []
# Loop all the file path
for imagePath in imagePaths:
# Get the image and convert it to grayscale
PIL_img = Image.open(imagePath).convert('L')
# PIL image to numpy array
img_numpy = np.array(PIL_img,'uint8')
# Get the image id
id = int(os.path.split(imagePath)[-1].split(".")[1])
print(id)
# Get the face from the training images
faces = detector.detectMultiScale(img_numpy)
# Loop for each face, append to their respective ID
for (x,y,w,h) in faces:
# Add the image to face samples
faceSamples.append(img_numpy[y:y+h,x:x+w])
# Add the ID to IDs
ids.append(id)
# Pass the face array and IDs array
return faceSamples,ids
# Get the faces and IDs
faces, ids = getImagesAndLabels('./dataset')
# Train the model using the faces and IDs
recognizer.train(faces, np.array(ids))
# Save the model into trainer.yml
recognizer.save('./trainer/trainer.yml')
print("Training Completed")
iii. face_recognition.py
# Import OpenCV2 for image processing
import cv2
# Import numpy for matrices calculations
import numpy as np
# Create Local Binary Patterns Histograms for face recognization
recognizer = cv2.face.LBPHFaceRecognizer_create()
# Load the trained mode
recognizer.read('./trainer/trainer.yml')
# Load prebuilt model for Frontal Face
cascadePath = "./data/haarcascade_frontalface_default.xml"
# Create classifier from prebuilt model
faceCascade = cv2.CascadeClassifier(cascadePath)
# Set the font style
font = cv2.FONT_HERSHEY_SIMPLEX
# Initialize and start the video frame capture
cam = cv2.VideoCapture(0)
# Loop
while True:
# Read the video frame
ret, im =cam.read()
# Convert the captured frame into grayscale
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
# Get all face from the video frame
faces = faceCascade.detectMultiScale(gray, 1.2,5)
# For each face in faces
for(x,y,w,h) in faces:
# Create rectangle around the face
cv2.rectangle(im, (x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)
# Recognize the face belongs to which ID
Id = recognizer.predict(gray[y:y+h,x:x+w])
print(Id)
# Check the ID if exist
if(Id[0] == 5):
Id = "Gz"
#If not exist, then it is Unknown
elif(Id[0] == 2):
Id = "Lzy"
else:
Id = "Unknow"
# Put text describe who is in the picture
cv2.rectangle(im, (x-22,y-90), (x+w+22, y-22), (0,255,0), -1)
cv2.putText(im, str(Id), (x,y-40), font, 2, (255,255,255), 3)
# Display the video frame with the bounded rectangle
cv2.imshow('im',im)
# If 'q' is pressed, close program
if cv2.waitKey(10) & 0xFF == ord('q'):
break
# Stop the camera
cam.release()
# Close all windows
cv2.destroyAllWindows()
参考资料: