函数式
import cv2
import mediapipe as mp
import time
import os
import random
def video_ope(file):
ctime = 0
ptime = 0
Frame = 0
switch = True
cap = cv2.VideoCapture(file)
myface_mesh = mp.solutions.face_mesh
myDraw = mp.solutions.drawing_utils
face_mesh = myface_mesh.FaceMesh(max_num_faces=2)
while(True):
ret, img = cap.read()
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = face_mesh.process(img_rgb)
if (results.multi_face_landmarks):
for id,faceLms in enumerate(results.multi_face_landmarks):
drawing_spec=myDraw.DrawingSpec(thickness=2,circle_radius=1,color=(255,255,255))
myDraw.draw_landmarks(img,faceLms,myface_mesh.FACE_CONNECTIONS,landmark_drawing_spec=drawing_spec)
for id,lm in enumerate(faceLms.landmark):
ih,iw,ic = img.shape
x,y = int(lm.x*iw),int(lm.y*ih)
#print (id,x,y)
Frame+=1
cv2.putText(img,str(int(Frame)),(10,70),cv2.FONT_HERSHEY_PLAIN,3,(255,0,255),3)
cv2.imshow("Frame", img)
key = cv2.waitKey(1) & 0xFF
if key== ord('q'):
cv2.waitKey(0)
if key== 27:
break
cap.release()
cv2.destroyAllWindows()
def main():
file = "video/eye.mp4"
video_ope(file)
if __name__=="__main__":
main()
模块式
import cv2
import mediapipe as mp
import time
import os
import random
class FaceMeshDetector:
def __init__(self,staticMode=False,maxFaces=2,minDetectionCon=0.5,minTrackCon=0.5):
self.staticMode=staticMode
self.maxFaces= maxFaces
self.minDetectionCon=minDetectionCon
self.minTrackCon=minTrackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpFaceMesh = mp.solutions.face_mesh
self.faceMesh = self.mpFaceMesh.FaceMesh(self.staticMode,self.maxFaces,self.minDetectionCon,self.minTrackCon)
self.drawSpec=self.mpDraw.DrawingSpec(thickness=1,circle_radius=1,color=(255,255,255))
def findFaceMesh(self,img):
faceList = []
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = self.faceMesh.process(img_rgb)
if (results.multi_face_landmarks):
for id,faceLms in enumerate(results.multi_face_landmarks):
faceList.append(faceLms)
return faceList
def drawFace(self,img,faceLms):
self.mpDraw.draw_landmarks(img,faceLms,self.mpFaceMesh.FACE_CONNECTIONS,landmark_drawing_spec=self.drawSpec)
face=[]
for id,lm in enumerate(faceLms.landmark):
ih,iw,ic = img.shape
x,y = int(lm.x*iw),int(lm.y*ih)
print (id,x,y)
cv2.putText(img,str(id),(x,y).cv2.FONT_HERSHEY_PLAIN,0.7,(0,255,0),1)
face.append([x,y])
return face
def FindFaceMesh(self,img,draw=True):
faceList = []
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = self.faceMesh.process(img_rgb)
if (results.multi_face_landmarks):
for id,faceLms in enumerate(results.multi_face_landmarks):
if draw:
self.mpDraw.draw_landmarks(img,faceLms,self.mpFaceMesh.FACE_CONNECTIONS,landmark_drawing_spec=self.drawSpec)
face=[]
for id,lm in enumerate(faceLms.landmark):
ih,iw,ic = img.shape
x,y = int(lm.x*iw),int(lm.y*ih)
print (id,x,y)
cv2.putText(img,str(id),(x,y),cv2.FONT_HERSHEY_PLAIN,0.5,(0,255,0),1)
face.append([x,y])
faceList.append(face)
return img,faceList
def video_ope(file):
Frame = 0
cap = cv2.VideoCapture(file)
faceObj = FaceMeshDetector()
while(True):
ret, img = cap.read()
faceObj.FindFaceMesh(img)
Frame+=1
cv2.putText(img,str(int(Frame)),(10,70),cv2.FONT_HERSHEY_PLAIN,3,(255,0,255),3)
cv2.imshow("Frame", img)
key = cv2.waitKey(1) & 0xFF
if key== ord('q'):
cv2.waitKey(0)
if key== 27:
break
cap.release()
cv2.destroyAllWindows()
def main():
file = "video/eye.mp4"
video_ope(file)
if __name__=="__main__":
main()
参考:https://google.github.io/mediapipe/solutions/face_mesh.html