yolov11姿态估计

使用yolov11的yolov11n-pose.pt模型实现人体姿态估计。

包含内容:

1:提取关键点,并保存为Excel文件;

2:绘制骨架;

3:对关键点信息归一化。

import cv2
import os
from ultralytics import YOLO
import numpy as np
import pandas as pd

# 加载模型
model_path = "weights/yolo11n-pose.pt"  # 替换为实际的模型路径
model = YOLO(model_path)

# 视频路径
video_path = "video/方形步加四分之一点转-xxx..avi"  # 替换为你的视频路径
cap = cv2.VideoCapture(video_path)

# 检查视频是否打开成功
assert cap.isOpened(), "Error reading video file"

# 视频尺寸
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

# 视频保存设置
fps = int(cap.get(cv2.CAP_PROP_FPS))
out_path = "./runs/" + os.path.splitext(os.path.basename(video_path))[0] + ".avi"
video_writer = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (frame_width, frame_height))

# 存储关键点的列表
keypoints_list = []

# 姿态估计
frame_index = 0
while cap.isOpened():
    success, frame = cap.read()
    if not success:
        break

    # 使用 YOLO 模型进行推理
    results = model.predict(source=frame, save=False, save_txt=False, save_crop=False, save_conf=False)

    # 绘制关键点和骨架
    for result in results:
        if hasattr(result, 'keypoints'):
            keypoints = result.keypoints.data.cpu().numpy()  # 获取关键点
            for kpt in keypoints:
                frame_keypoints = []
                for x, y, conf in kpt:
                    if conf > 0.5:  # 仅保存置信度大于 0.5 的关键点
                        # 归一化坐标
                        x_norm = x / frame_width
                        y_norm = y / frame_height
                        frame_keypoints.append((x_norm, y_norm, conf))
                        cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 0), -1)

                # 保存每帧的关键点数据
                keypoints_list.append({"frame": frame_index, "keypoints": frame_keypoints})

                # 示例:绘制骨架连接
                skeleton = [[0, 1], [0, 2], [1, 3], [2, 4], [6, 5], [6, 8], [8, 10], [5, 7], [7, 9], [6, 12], [12, 14], [14, 16], [5, 11], [11, 13], [13, 15],[11,12]]  # 替换为实际骨架索引
                for (p1, p2) in skeleton:
                    x1, y1, c1 = kpt[p1]
                    x2, y2, c2 = kpt[p2]
                    if c1 > 0.5 and c2 > 0.5:  # 骨架两点都置信度大于 0.5 时绘制
                        cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 2)

    # 显示处理后的帧
    cv2.imshow("Pose Estimation", frame)
    video_writer.write(frame)
    frame_index += 1

    # 按下 'q' 键退出
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# 释放资源
cap.release()
video_writer.release()
cv2.destroyAllWindows()

# 将关键点数据保存为 Excel 文件
data = []
for item in keypoints_list:
    frame = item["frame"]
    for i, (x_norm, y_norm, conf) in enumerate(item["keypoints"]):
        data.append({"Frame": frame, "Keypoint_Index": i, "X_Norm": x_norm, "Y_Norm": y_norm, "Confidence": conf})

# 转换为 DataFrame
df = pd.DataFrame(data)

# 保存为 Excel 文件
output_excel_path = "keypoints_output_normalized.xlsx"
df.to_excel(output_excel_path, index=False)
print(f"归一化关键点数据已保存到 {output_excel_path}")

 实现结果:

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值