#!/usr/bin/env python# coding: utf-8import cv2
import os
import numpy as np
from PIL import Image
cam = cv2.VideoCapture(1)
cam.set(3,640)# set video width
cam.set(4,480)# set video height
face_detector = cv2.CascadeClassifier(r'./haarcascade_frontalface_default.xml')# For each person, enter one numeric face id
face_id =input('\n 输入enter user id end press <return> ==> ')print("\n [INFO] Initializing face capture. Look the camera and wait ...")# Initialize individual sampling face count
count =0whileTrue:
ret, img = cam.read()# img = cv2.flip(img, -1) # Flip vertically
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray,1.3,5)for(x, y, w, h)in faces:
cv2.rectangle(img,(x, y),(x+w, y+h),(255,0,0),2)
count +=1# Save the captured image into the datasets folder
cv2.imwrite(r"./dataset/User."+str(face_id)+'.'+str(count)+".jpg", gray[y:y+h, x:x+w])
cv2.imshow('image', img)
k = cv2.waitKey(100)&0xff# Press 'ESC' for exiting videoif k ==27:breakelif count >=250:# Take 30 face sample and stop videobreak
cam.release()
cv2.destroyAllWindows()# Path for face image database
path =r'./dataset'
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")# function to get the images and label datadefgetImagesAndLabels(path):
imagePaths =[os.path.join(path, f)for f in os.listdir(path)]
faceSamples=[]
ids =[]for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L')# convert it to grayscale
img_numpy = np.array(PIL_img,'uint8')id=int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy)for(x, y, w, h)in faces:
faceSamples.append(img_numpy[y:y+h, x:x+w])
ids.append(id)return faceSamples, ids
print("\n [INFO] Training faces. It will take a few seconds. Wait ...")
faces, ids = getImagesAndLabels(path)
recognizer.train(faces, np.array(ids))# Save the model into trainer/trainer.yml
recognizer.write(r'./trainer.yml')# recognizer.save() worked on Mac, but not on Pi# Print the number of faces trained and end programprint("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))
2.2. 识别
#!/usr/bin/env python# coding: utf-8import cv2
from cv2 import face_LBPHFaceRecognizer
import numpy as np
import os
recognizer = face_LBPHFaceRecognizer.create()
recognizer.read(r'./trainer.yml')# path of face model
cascadePath =r"./haarcascade_frontalface_default.xml"# path of face classifier
faceCascade = cv2.CascadeClassifier(cascadePath)
font = cv2.FONT_HERSHEY_SIMPLEX
# init id counterid=1# names related to ids: example ==> Marcelo: id=1, etc
names =['null','Micheal']# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)
cam.set(3,640)# set video width
cam.set(4,480)# set video height# Define min window size to be recognized as a face
minW =0.1*cam.get(3)
minH =0.1*cam.get(4)whileTrue:
ret, img = cam.read()# img = cv2.flip(img, -1) # Flip vertically
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)# set parameters of classifier
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int(minW),int(minH)),)for(x, y, w, h)in faces:
cv2.rectangle(img,(x, y),(x+w, y+h),(0,255,0),2)id, confidence = recognizer.predict(gray[y:y+h, x:x+w])# Check if confidence is less them 100 ==> "0" is perfect match if confidence <100:id= names[id]
confidence =" {0}%".format(round(100- confidence))else:id="unknown"
confidence =" {0}%".format(round(100- confidence))
cv2.putText(img,str(id),(x+5,y-5), font,1,(255,255,255),2)
cv2.putText(img,str(confidence),(x+5,y+h-5), font,1,(255,255,0),1)
cv2.imshow('camera', img)# Press 'ESC' for exiting video
k = cv2.waitKey(10)&0xffif k ==27:break
cam.release()
cv2.destroyAllWindows()