基于mediapipe进行的肢体行为标记
采用方法
主要使用google 的 mediapipe 工具包中的pose模块和opencv中的显示标识模块
使用事项
文中的1.flv 可以换成其他格式的视频,只要有肢体存在。
代码如下
"""
基础环境 python3
安装包 pip install opencv-python mediapipe
"""
import cv2
import mediapipe as mp
import time
class poseDetector():
def __init__(self, mode=False, model=2, smooth=True, detection=0.5, tracking=0.5):
self.mode = mode
self.model = model
self.smooth = smooth
self.detection = detection
self.tracking = tracking
self.mpPose = mp.solutions.pose
self.poses = self.mpPose.Pose(self.mode, self.model,self.smooth,self.detection, self.tracking)
self.mpDraw = mp.solutions.drawing_utils
def findPose(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.poses.process(imgRGB)
print(self.results.pose_landmarks)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS)
return img
def findPosition(self, img,draw=True):
lmList = []
if self.results.pose_landmarks:
myPose = self.results.pose_landmarks.landmark
for id, lm in enumerate(myPose):
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
lmList.append((id, cx, cy))
if draw:
cv2.circle(img, (cx, cy), 10, (255, 0, 255), cv2.FILLED)
return lmList
def main():
pTime = 0
cap = cv2.VideoCapture('1.flv')
detector = poseDetector()
while True:
success, img = cap.read()
img = detector.findPose(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
pass
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, f"fps:{int(fps)}", (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
cv2.imshow('Image', img)
cv2.waitKey(1)
if __name__ == '__main__':
main()