<facelocation.py>为根据开源代码改写的人脸角度识别代码
返回的结果包含人脸距离和人脸角度,具体阈值可根据需求自行修改
<facelocation.py>
import face_recognition as fr
import cv2
import numpy as np
import dlib
import time
import math
//代码中的部分内容根据开源网络代码进行修改
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
POINTS_NUM_LANDMARK = 68
def _largest_face(dets):
if len(dets) == 1:
return 0
face_areas = [(det.right() - det.left()) * (det.bottom() - det.top()) for det in dets]
largest_area = face_areas[0]
largest_index = 0
for index in range(1, len(dets)):
if face_areas[index] > largest_area:
largest_index = index
largest_area = face_areas[index]
print("largest_face index is {} in {} faces".format(largest_index, len(dets)))
return largest_index
def get_image_points_from_landmark_shape(landmark_shape):
if landmark_shape.num_parts != POINTS_NUM_LANDMARK:
print("ERROR:landmark_shape.num_parts-{}".format(landmark_shape.num_parts))
return -1, None
image_points = np.array([
(landmark_shape.part(30).x, landmark_shape.part(30).y),
(landmark_shape.part(8).x, landmark_shape.part(8).y),
(landmark_shape.part(36).x, landmark_shape.part(36).y),
(landmark_shape.part(45).x, landmark_shape.part(45).y),
(landmark_shape.