【openMMLab】MMPose 代码教程

系列文章目录

第一课:【OpenMMLab】OpenMMLab概述
第二课:【OpenMMLab】人体姿态估计、关键点检测与MMPose


前言

在上一次博客中,我们介绍了人体姿态估计,关键点检测任务相关的知识,本次博客将带大家进行关键点检测实战,体验关键点检测的全流程。
参考资料包括:

实验环境

实验环境 Ubuntu 22.04,NVIDIA Driver Version 530.41.03

安装 Pytorch

  1. 创建 conda 虚拟环境。(此处省略了安装anaconda)
conda create -n env_name python=*.*  # 创建虚拟环境
conda activate env_name				# 激活虚拟环境
  1. 安装 pytorch
    pytorch 官网选择所需的 Pytorch 版本和 CUDA 版本。
    本次安装使用 pip 安装,选择了最新的Pytorch 2.0.1 和 驱动支持的最高CUDA版本 11.8
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118

在这里插入图片描述
3. 简单验证Pytorch是否安装成功

python 		# 进入 python console
>>> import torch
>>> torch.__version__
'2.0.1+cu118'
>>> torch.cuda.is_available()
True
>>> exit()

安装 MMPose

用MIM安装MMCV

pip install -U openmim		# -U 表示升级到最新版本
mim install mmengine
mim install 'mmcv>=2.0.0'
mim install 'mmdet>=3.0.0'
# 进入 python console 检查mmcv是否安装成功
python 
>>> import mmcv
>>> from mmcv.ops import get_compiling_cuda_version, get_compiler_version
>>> mmcv.__version__
'2.0.0'
>>> get_compiling_cuda_version()
'11.8'
>>> get_compiler_version()
'GCC 9.3'
>>> exit()

安装其他工具包

pip install opencv-python pillow matplotlib seaborn tqdm pycocotools

下载MMPose

可参考:
廖雪峰 Git 教程
pip install . 和 pip install -e . 的区别

# 进入准备安装项目的目录
cd path_to_project		
# 如果配置好git ssh  -b 表示 视频教程中使用的项目分支,如果只需要安装mmpose 不需要加-b tutorial2023
git clone git@github.com:open-mmlab/mmpose.git -b tutorial2023
# 也可以直接 https
git clone https://github.com/open-mmlab/mmpose.git -b tutorial2023
cd mmpose
mim install -e .
# 创建用于保存 训练权重,预测结果 数据 的文件夹
mkdir checkpoint output data/test 
# 进入 python console 检查 mmpose 安装是否成功
python
>>> import mmpose
>>> mmpose.__version__
'1.0.0'
>>> exit()

安装 MMDetection

cd path_to_project
# 如果不跟同济子豪兄的视频教程,不需要加 -b
git clone git@github.com:open-mmlab/mmdetection.git -b 3.x
git clone https://github.com/open-mmlab/mmdetection.git -b 3.x
cd mmdetection
mkdir checkpoint outputs data
pip install -v -e .
python
>>> import mmdet
>>> mmdet.__version__
'3.0.0'
>>> exit()

MMPose 预训练模型预测

准备预测素材

wget 命令详解

# 进入mmpose项目
cd path_to_mmpose 
# 多人图片,来源:https://www.pexels.com/zh-cn/photo/2168292/
wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220610-mmpose/images/multi-person.jpeg -O data/test/multi-person.jpeg

# 单人视频-唱跳篮球 
wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220610-mmpose/videos/cxk.mp4 -P data/test

# 妈妈和女儿跳舞,经微信压缩
wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220610-mmpose/videos/mother_wx.mp4 -P data/test

# 两个女生跳舞视频,来源:https://mixkit.co/free-stock-video/two-girls-having-fun-in-a-retro-restaurant-42298/
wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220610-mmpose/videos/two-girls.mp4 -O data/test/two-girls.mp4

模型库预训练模型

demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py

https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth

  1. RTMPose-S
    projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py
    https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth
  2. RTMPose-L
    projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-l_8xb256-420e_coco-384x288.py
    https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-l_simcc-aic-coco_pt-aic-coco_420e-384x288-97d6cb0f_20230228.pth

预测

  1. HRNet
python demo/topdown_demo_with_mmdet.py \
        demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
        https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
        configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \
        https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \
        --input data/test/multi-person.jpeg \
        --output-root outputs/B1_HRNet_1 \
        --device cuda:0 \
        --bbox-thr 0.5 \
        --kpt-thr 0.2 \
        --nms-thr 0.3 \
        --radius 8 \
        --thickness 4 \
        --draw-bbox \
        --draw-heatmap \
        --show-kpt-idx

RTMPose

python demo/topdown_demo_with_mmdet.py \
        demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
        https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
        projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py \
        https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth \
        --input data/test/multi-person.jpeg \
        --output-root outputs/B1_RTM_1 \
        --device cuda:0 \
        --bbox-thr 0.5 \
        --kpt-thr 0.5 \
        --nms-thr 0.3 \
        --radius 8 \
        --thickness 4 \
        --draw-bbox \
        --draw-heatmap \
        --show-kpt-idx

预测结果:
在这里插入图片描述

视频预测

# HRNet
python demo/topdown_demo_with_mmdet.py \
        demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
        https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
        configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \
        https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \
        --input data/test/mother_wx.mp4 \
        --output-root outputs/B1_HRNet_2 \
        --device cuda:0 \
        --bbox-thr 0.5 \
        --kpt-thr 0.2 \
        --nms-thr 0.3 \
        --radius 5 \
        --thickness 2 \
        --draw-bbox \
        --draw-heatmap \
        --show-kpt-idx
# RTMPose
python demo/topdown_demo_with_mmdet.py \
        demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
        https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
        projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-s_8xb256-420e_coco-256x192.py \
        https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmpose-s_simcc-aic-coco_pt-aic-coco_420e-256x192-fcb2599b_20230126.pth \
        --input data/test/mother_wx.mp4 \
        --output-root outputs/B1_RTM_2 \
        --device cuda:0 \
        --bbox-thr 0.5 \
        --kpt-thr 0.5 \
        --nms-thr 0.3 \
        --radius 5 \
        --thickness 2 \
        --draw-bbox \
        --draw-heatmap \
        --show-kpt-idx

预测Python API

# 导入工具包
import cv2
import numpy as np
from PIL import Image

import matplotlib.pyplot as plt
%matplotlib inline
import torch
import mmcv
from mmcv import imread
import mmengine
from mmengine.registry import init_default_scope

from mmpose.apis import inference_topdown
from mmpose.apis import init_model as init_pose_estimator
from mmpose.evaluation.funcational import nms
from mmpose.registry import VISUALIZERS
from mmpose.structures import merge_data_samples

from mmdet.apis import inference_detection, init_detector

# 设置设备
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)

# 载入图像
img_path = "data/test/multi-person.jpeg"

# 构建目标检测器
detector = init_detector(
    'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py',
    'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
    device=device
)
# 人体姿态估计模型
pose_estimator = init_pose_estimator(
    'configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py',
    'https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth',
    device=device,
    cfg_options={'model': {'test_cfg': {'output_heatmaps': True}}}
)
init_default_scope(detector.cfg.get('default_scope', 'mmdet'))
# 目标检测结果
detect_result = inference_detector(detector, img_path)
detect_result.keys()
detect_result.pred_instances.labels
detect_result.pred_instances.scores

# 置信度阈值过滤,获得最终目标检测预测结果
# 置信度阈值
CONF_THRES = 0.5
pred_instance = detect_result.pred_instances.cpu().numpy()
bboxes = np.concatenate((pred_instance.bboxes, pred_instance.scores[:, None]), axis=1)
bboxes = bboxes[np.logical_and(pred_instance.labels == 0, pred_instance.scores > CONF_THRES)]
bboxes = bboxes[nms(bboxes, 0.3)][:, :4]
print(bboxes)

# 预测关键点
# 获取每个 bbox 的关键点预测结果
pose_results = inference_topdown(pose_estimator, img_path, bboxes)
len(pose_results)
# 把多个bbox的pose结果打包到一起
data_samples = merge_data_samples(pose_results)
print(data_samples.keys())
# 每个人 17个关键点 坐标
print(data_samples.pred_instances.keypoints.shape)
# 索引为 0 的人,每个关键点的坐标
print(data_samples.pred_instances.keypoints[0,:,:])

# 每一类关键点的预测热力图
print(data_samples.pred_fields.heatmaps.shape)
idx_point = 13
heatmap = data_samples.pred_fields.heatmaps[idx_point,:,:]
print(heatmap.shape)
plt.imshow(heatmap)
plt.show()

# 使用官方的可视化工具进行可视化
# 半径
pose_estimator.cfg.visualizer.radius = 10
# 线宽
pose_estimator.cfg.visualizer.line_width = 8
visualizer = VISUALIZERS.build(pose_estimator.cfg.visualizer)
# 元数据
visualizer.set_dataset_meta(pose_estimator.dataset_meta)
# 可视化展示
img = mmcv.imread(img_path)
img = mmcv.imconvert(img, 'bgr', 'rgb')
img_output = visualizer.add_datasample(
            'result',
            img,
            data_sample=data_samples,
            draw_gt=False,
            draw_heatmap=True,
            draw_bbox=True,
            show_kpt_idx=True,
            show=False,
            wait_time=0,
            out_file='outputs/B2.jpg'
)
plt.figure(figsize=(10,10))
plt.imshow(img_output)
plt.show()
  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值