- 项目源码包:face_recognition源码,dlib源码包
- 安装源码包步骤:
- pip3 install cmake(dlib安装需要编译,依赖cmake)
- pip3 install dlib
- pip3 install face_recognition
- pip3 install opencv-python
- 源码使用案例:(更多案例可参考face_recognition源码使用案例)
- 人脸检测
import face_recognition
import cv2
import numpy as np
import argparse
def arg_parse():
parser = argparse.ArgumentParser(description='face_recognition test')
parser.add_argument('--function', dest='function', help="which case to run", default='single_pic', type=str)
parser.add_argument('--video', dest='video', help='video to run', default='test.mp4', type=str)
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
function = args.function
video = args.video
if function == 'single_pic':
image = face_recognition.load_image_file("test.jpg")
face_locations = face_recognition.face_locations(image)
red = (255, 0, 0)
cv_image = cv2.imread("test.jpg")
for face_location in face_locations:
top,right,bottom,left = face_location
cv2.rectangle(cv_image,(left,top),(right,bottom),red,5)
while True:
cv2.imshow("ustczhng2012", cv_image)
if cv2.waitKey(1) == ord('e'):
break
cv2.destroyAllWindows()
elif function == 'camera':
cap = cv2.VideoCapture(0)
if not cap.isOpened():
cap.open()
red = (255, 0, 0)
while True:
ret, frame = cap.read()
face_locations = face_recognition.face_locations(frame)
for face_location in face_locations:
top,right,bottom,left = face_location
cv2.rectangle(frame,(left,top),(right,bottom),red,5)
cv2.imshow("image",frame)
if cv2.waitKey(1) == ord('e'):
break
cap.release()
cv2.destroyAllWindows()
elif function == 'video':
cap = cv2.VideoCapture(video)
if not cap.isOpened():
cap.open()
red = (255, 0, 0)
while True:
ret, frame = cap.read()
if ret == None or frame == None:
break
face_locations = face_recognition.face_locations(frame)
for face_location in face_locations:
top,right,bottom,left = face_location
cv2.rectangle(frame,(left,top),(right,bottom),red,5)
cv2.imshow("image",frame)
if cv2.waitKey(1) == ord('e'):
break
cap.release()
cv2.destroyAllWindows()
- 人脸对齐
# 人脸landmark demo
elif function == 'landmark':
image = face_recognition.load_image_file('test.jpg')
face_landmarks_list = face_recognition.face_landmarks(image)
pil_image = Image.fromarray(image)
d = ImageDraw.Draw(pil_image)
for face_landmarks in face_landmarks_list:
for facial_feature in face_landmarks.keys():
d.line(face_landmarks[facial_feature], width=5)
pil_image.show()
- 人脸识别
# 人脸识别功能
elif function == 'recognition':
eureca_image = face_recognition.load_image_file("test.jpg")
miao_image = face_recognition.load_image_file("miao.jpg")
eureca_face_encoding = face_recognition.face_encodings(eureca_image)[0]
miao_face_encoding = face_recognition.face_encodings(miao_image)[0]
known_face_encoding =[eureca_face_encoding, miao_face_encoding]
known_face_names = ["master1", "master2"]
cap = cv2.VideoCapture(video)
while True:
ret, frame = cap.read()
face_locations = face_recognition.face_locations(frame)
face_encodings = face_recognition.face_encodings(frame, face_locations)
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
matches = face_recognition.compare_faces(known_face_encoding, face_encoding)
name = 'Unknown'
face_distances = face_recognition.face_distance(known_face_encoding, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
cv2.rectangle(frame, (left, top),(right, bottom), (255, 0, 0), 5)
cv2.putText(frame, name, (left+6, bottom-7), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
cv2.imshow('image',frame)
if cv2.waitKey(1) == ord('e'):
break
cv2.destroyAllWindows()