def return_pitch_yaw_roll(self, image, radians=False):
#The dlib shape predictor returns 68 points, we are interested only in a few of those
# TRACKED_POINTS =(0,4,8,12,16,17,26,27,30,33,36,39,42,45,62)
TRACKED_POINTS =[17,21,22,26,36,39,42,45,31,35,48,54,57,8]
LEFT_EYEBROW_LEFT =[6.825897,6.760612,4.402142]
LEFT_EYEBROW_RIGHT =[1.330353,7.122144,6.903745]
RIGHT_EYEBROW_LEFT =[-1.330353,7.122144,6.903745]
RIGHT_EYEBROW_RIGHT =[-6.825897,6.760612,4.402142]
LEFT_EYE_LEFT =[5.311432,5.485328,3.987654]
LEFT_EYE_RIGHT =[1.789930,5.393625,4.413414]
RIGHT_EYE_LEFT =[-1.789930,5.393625,4.413414]
RIGHT_EYE_RIGHT =[-5.311432,5.485328,3.987654]
NOSE_LEFT =[2.005628,1.409845,6.165652]
NOSE_RIGHT =[-2.005628,1.409845,6.165652]
MOUTH_LEFT =[2.774015,-2.080775,5.048531]
MOUTH_RIGHT =[-2.774015,-2.080775,5.048531]
LOWER_LIP =[0.000000,-3.116408,6.097667]
CHIN =[0.000000,-7.415691,4.070434]
landmarks_3D = np.float32([LEFT_EYEBROW_LEFT,
LEFT_EYEBROW_RIGHT,
RIGHT_EYEBROW_LEFT,
RIGHT_EYEBROW_RIGHT,
LEFT_EYE_LEFT,
LEFT_EYE_RIGHT,
RIGHT_EYEBROW_LEFT,
RIGHT_EYEBROW_RIGHT,
NOSE_LEFT,
NOSE_RIGHT,
MOUTH_LEFT,
MOUTH_RIGHT,
LOWER_LIP,
CHIN])
#Return the 2D position of our landmarks
landmarks_2D = self._return_landmarks(inputImg=image, points_to_return=TRACKED_POINTS)if landmarks_2D is not None :
#Print som red dots on the image
#for point in landmarks_2D:
#cv2.circle(frame,( point[0], point[1]),2,(0,0,255),-1)
#Applying the PnP solver to find the 3D pose
#of the head from the 2D position of the
#landmarks.
#retval -bool
#rvec - Output rotation vector that, together with tvec, brings
#points from the world coordinate system to the camera coordinate system.
#tvec - Output translation vector. It is the position of the world origin (SELLION) in camera co-ords
retval, rvec, tvec = cv2.solvePnP(landmarks_3D,
landmarks_2D,
self.camera_matrix,
self.camera_distortion)
#Get as input the rotational vector
#Return a rotational matrix
rmat,_= cv2.Rodrigues(rvec)
pose_mat = cv2.hconcat((rmat,tvec))
#euler_angles contain (pitch, yaw, roll)
# euler_angles = cv2.DecomposeProjectionMatrix(projMatrix=rmat, cameraMatrix=self.camera_matrix, rotMatrix, transVect, rotMatrX=None, rotMatrY=None, rotMatrZ=None)_,_,_,_,_,_,euler_angles = cv2.decomposeProjectionMatrix(pose_mat)returnlist(euler_angles)
head_pose =[ rmat[0,0], rmat[0,1], rmat[0,2], tvec[0],
rmat[1,0], rmat[1,1], rmat[1,2], tvec[1],
rmat[2,0], rmat[2,1], rmat[2,2], tvec[2],0.0,0.0,0.0,1.0]
#print(head_pose) #TODO remove this line
return self.rotationMatrixToEulerAngles(rmat)else:return None
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(self, R):
#assert(isRotationMatrix(R))
#To prevent the Gimbal Lock it is possible to use
#a threshold of 1e-6for discrimination
sy = math.sqrt(R[0,0]* R[0,0]+ R[1,0]* R[1,0])
singular = sy <1e-6if not singular :
x = math.atan2(R[2,1], R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])else:
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z =0return np.array([x, y, z])
# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR =eye_aspect_ratio(leftEye)
rightEAR =eye_aspect_ratio(rightEye)