1、模型
opencv提供三个模型:
1、coco model
2、MPI model
3、hand pose model
2、示例代码
(1)c++
//
// this sample demonstrates the use of pretrained openpose networks with opencv's dnn module.
//
// it can be used for body pose detection, using either the COCO model(18 parts):
// http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/coco/pose_iter_440000.caffemodel
// https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/openpose_pose_coco.prototxt
//
// or the MPI model(16 parts):
// http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel
// https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/openpose_pose_mpi_faster_4_stages.prototxt
//
// (to simplify this sample, the body models are restricted to a single person.)
//
//
// you can also try the hand pose model:
// http://posefs1.perception.cs.cmu.edu/OpenPose/models/hand/pose_iter_102000.caffemodel
// https://raw.githubusercontent.com/CMU-Perceptual-Computing-Lab/openpose/master/models/hand/pose_deploy.prototxt
//
#include <opencv2/core.hpp>
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace cv::dnn;
#include <iostream>
using namespace std;
// connection table, in the format [model_id][pair_id][from/to]
// please look at the nice explanation at the bottom of:
// https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/output.md
//
const int POSE_PAIRS[3][20][2] = {
{ // COCO body
{ 1,2 },{ 1,5 },{ 2,3 },
{ 3,4 },{ 5,6 },{ 6,7 },
{ 1,8 },{ 8,9 },{ 9,10 },
{ 1,11 },{ 11,12 },{ 12,13 },
{ 1,0 },{ 0,14 },
{ 14,16 },{ 0,15 },{ 15,17 }
},
{ // MPI body
{ 0,1 },{ 1,2 },{ 2,3 },
{ 3,4 },{ 1,5 },{ 5,6 },
{ 6,7 },{ 1,14 },{ 14,8 },{ 8,9 },
{ 9,10 },{ 14,11 },{ 11,12 },{ 12,13 }
},
{ // hand
{ 0,1 },{ 1,2 },{ 2,3 },{ 3,4 }, // thumb
{ 0,5 },{ 5,6 },{ 6,7 },{ 7,8 }, // pinkie
{ 0,9 },{ 9,10 },{ 10,11 },{ 11,12 }, // middle
{ 0,13 },{ 13,14 },{ 14,15 },{ 15,16 }, // ring
{ 0,17 },{ 17,18 },{ 18,19 },{ 19,20 } // small
} };
void drawBody(cv::Mat& frame, cv::Mat& result, int thresh);
int main()
{
String modelTxt = R"(../../data/testdata/dnn/openpose_pose_mpi.prototxt)";
String modelBin = R"(../../data/testdata/dnn/openpose_pose_mpi.caffemodel)";
int W_in = 368;
int H_in = 368;
float thresh = 0.1;
int backendId = cv::dnn::DNN_BACKEND_OPENCV;
int targetId = cv::dnn::DNN_TARGET_CPU;
// read the network model
Net net = readNetFromCaffe(modelTxt, modelBin);
net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId);
VideoCapture cap(0);
if (!cap.isOpened()) {
cerr << "open cam err." << endl;
return 0;
}
Mat frame = cv::imread(R"(../../data/body.jpg)");
// while (cap.read(frame)) {
if (frame.empty()) {
// break;
return 0;
}
// send it through the network
Mat inputBlob = blobFromImage(frame, 1.0 / 255, Size(W_in, H_in), Scalar(0, 0, 0), false, false);
net.setInput(inputBlob);
Mat result = net.forward();
// the result is an array of "heatmaps", the probability of a body part being in location x,y
drawBody(frame, result, thresh);
std::vector<double> layersTimes;
double freq = getTickFrequency() / 1000;
double t = net.getPerfProfile(layersTimes) / freq;
std::string label = format("Inference time: %.2f ms", t);
putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
imshow("OpenPose", frame);
waitKey(0);
// waitKey(1);
// }
return 0;
}
void drawBody(cv::Mat& frame, cv::Mat& result, int thresh)
{
int midx, npairs;
int nparts = result.size[1];
int H = result.size[2];
int W = result.size[3];
// find out, which model we have
if (nparts == 19) { // COCO body
midx = 0;
npairs = 17;
nparts = 18; // skip background
}
else if (nparts == 16) { // MPI body
midx = 1;
npairs = 14;
}
else if (nparts == 22) { // hand
midx = 2;
npairs = 20;
}
else {
cerr << "there should be 19 parts for the COCO model, 16 for MPI, or 22 for the hand one, but this model has " << nparts << " parts." << endl;
return;
}
// find the position of the body parts
vector<Point> points(22);
for (int n = 0; n < nparts; n++) {
// Slice heatmap of corresponding body's part.
Mat heatMap(H, W, CV_32F, result.ptr(0, n));
// 1 maximum per heatmap
Point p(-1, -1), pm;
double conf;
minMaxLoc(heatMap, 0, &conf, 0, &pm);
if (conf > thresh)
p = pm;
points[n] = p;
}
// connect body parts and draw it !
float SX = float(frame.cols) / W;
float SY = float(frame.rows) / H;
for (int n = 0; n < npairs; n++) {
// lookup 2 connected body/hand parts
Point2f a = points[POSE_PAIRS[midx][n][0]];
Point2f b = points[POSE_PAIRS[midx][n][1]];
// we did not find enough confidence before
if (a.x <= 0 || a.y <= 0 || b.x <= 0 || b.y <= 0)
continue;
// scale to image size
a.x *= SX; a.y *= SY;
b.x *= SX; b.y *= SY;
line(frame, a, b, Scalar(0, 200, 0), 2);
circle(frame, a, 3, Scalar(0, 0, 200), -1);
circle(frame, b, 3, Scalar(0, 0, 200), -1);
}
}
(2)python
# To use Inference Engine backend, specify location of plugins:
# export LD_LIBRARY_PATH=/opt/intel/deeplearning_deploymenttoolkit/deployment_tools/external/mklml_lnx/lib:$LD_LIBRARY_PATH
import cv2 as cv
import numpy as np
import argparse
parser = argparse.ArgumentParser(
description='This script is used to demonstrate OpenPose human pose estimation network '
'from https://github.com/CMU-Perceptual-Computing-Lab/openpose project using OpenCV. '
'The sample and model are simplified and could be used for a single person on the frame.')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--proto', help='Path to .prototxt')
parser.add_argument('--model', help='Path to .caffemodel')
parser.add_argument('--dataset', help='Specify what kind of model was trained. '
'It could be (COCO, MPI) depends on dataset.')
parser.add_argument('--thr', default=0.1, type=float, help='Threshold value for pose parts heat map')
parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.')
parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.')
args = parser.parse_args()
args.dataset = 'MPI'
args.proto = '../data/testdata/dnn/openpose_pose_mpi.prototxt'
args.model = '../data/testdata/dnn/openpose_pose_mpi.caffemodel'
if args.dataset == 'COCO':
BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14,
"LEye": 15, "REar": 16, "LEar": 17, "Background": 18 }
POSE_PAIRS = [ ["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"],
["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"],
["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"] ]
else:
assert(args.dataset == 'MPI')
BODY_PARTS = { "Head": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "Chest": 14,
"Background": 15 }
POSE_PAIRS = [ ["Head", "Neck"], ["Neck", "RShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["Neck", "LShoulder"], ["LShoulder", "LElbow"],
["LElbow", "LWrist"], ["Neck", "Chest"], ["Chest", "RHip"], ["RHip", "RKnee"],
["RKnee", "RAnkle"], ["Chest", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"] ]
inWidth = args.width
inHeight = args.height
net = cv.dnn.readNetFromCaffe(cv.samples.findFile(args.proto), cv.samples.findFile(args.model))
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_OPENCL)
cap = cv.VideoCapture(args.input if args.input else 0)
while cv.waitKey(1) < 0:
hasFrame, frame = cap.read()
if not hasFrame:
cv.waitKey()
break
frameWidth = frame.shape[1]
frameHeight = frame.shape[0]
inp = cv.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),
(0, 0, 0), swapRB=False, crop=False)
net.setInput(inp)
out = net.forward()
assert(len(BODY_PARTS) == out.shape[1])
points = []
for i in range(len(BODY_PARTS)):
# Slice heatmap of corresponging body's part.
heatMap = out[0, i, :, :]
# Originally, we try to find all the local maximums. To simplify a sample
# we just find a global one. However only a single pose at the same time
# could be detected this way.
_, conf, _, point = cv.minMaxLoc(heatMap)
x = (frameWidth * point[0]) / out.shape[3]
y = (frameHeight * point[1]) / out.shape[2]
# Add a point if it's confidence is higher than threshold.
points.append((int(x), int(y)) if conf > args.thr else None)
for pair in POSE_PAIRS:
partFrom = pair[0]
partTo = pair[1]
assert(partFrom in BODY_PARTS)
assert(partTo in BODY_PARTS)
idFrom = BODY_PARTS[partFrom]
idTo = BODY_PARTS[partTo]
if points[idFrom] and points[idTo]:
cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3)
cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)
cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)
t, _ = net.getPerfProfile()
freq = cv.getTickFrequency() / 1000
cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
cv.imshow('OpenPose using OpenCV', frame)
3、示例
(1) mpi model
openpose_pose_mpi.caffemodel
openpose_pose_mpi_faster_4_stages.prototxt : cpu 945ms, opencl 1543ms
openpose_pose_mpi.prototxt : cpu 1365ms, opencl 1732ms
(2) coco model
openpose_pose_coco.caffemodel
openpose_pose_coco.prototxt : cpu 1420ms, opencl 1837ms
(3) hand model
opencl 2ms, 实际耗时有1s (opencv对某些模型forward测时不正确?)
cpu 1061ms