mediapipe 导出dll

// HolisticTrackingApi.h

#ifndef HOLISTIC_TRACKING_API_H
#define HOLISTIC_TRACKING_API_H

#define EXPORT

#include <malloc.h>
#include <vector>
#include "HolisticTrackingDetect.h"  // 添加这一行

#ifdef _WIN32
#ifdef EXPORT
#define EXPORT_API __declspec(dllexport)
#else
#define EXPORT_API __declspec(dllimport)
#endif
#else
#include <stdlib.h>

#ifdef EXPORT
#define EXPORT_API __attribute__((visibility ("default")))
#else
#endif

#endif

#ifdef __cplusplus
extern "C" {
#endif 

#ifndef EXPORT_API
#define EXPORT_API
#endif

    EXPORT_API int MediapipeHolisticTrackingInit(const char* model_path);
    EXPORT_API int MediapipeHolisticTrackingDetectFrameDirect(
        int image_width, int image_height,
        void* image_data,
        std::vector<Point2D>& pose_result,
        std::vector<Point2D>& left_hand_result,
        std::vector<Point2D>& right_hand_result);

    EXPORT_API int MediapipeHolisticTrackingRelease();

#ifdef __cplusplus
}
#endif 

#endif // HOLISTIC_TRACKING_API_H
// HolisticTrackingApi.cpp

#include "HolisticTrackingApi.h"
#include "HolisticTrackingDetect.h"

using namespace GoogleMediapipeDetect;

HolisticTrackingDetect m_HolisticTrackingDetect;

EXPORT_API int MediapipeHolisticTrackingInit(const char* model_path)
{
    return m_HolisticTrackingDetect.InitModel(model_path);
}

EXPORT_API int MediapipeHolisticTrackingDetectFrameDirect(
    int image_width, int image_height,
    void* image_data,
    std::vector<Point2D>& pose_result,
    std::vector<Point2D>& left_hand_result,
    std::vector<Point2D>& right_hand_result)
{
    // 调用DetectImageDirect时只传递了六个参数
    return m_HolisticTrackingDetect.DetectImageDirect(image_width, image_height,
        image_data, pose_result, left_hand_result, right_hand_result);
}

EXPORT_API int MediapipeHolisticTrackingRelease()
{
    return m_HolisticTrackingDetect.Release();
}
#ifndef HOLISTIC_TRACKING_DETECT_H
#define HOLISTIC_TRACKING_DETECT_H

#include <cstdlib>
#include "absl/flags/flag.h"
#include "absl/flags/parse.h"
#include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/formats/image_frame.h"
#include "mediapipe/framework/formats/image_frame_opencv.h"
#include "mediapipe/framework/port/file_helpers.h"
#include "mediapipe/framework/port/opencv_highgui_inc.h"
#include "mediapipe/framework/port/opencv_imgproc_inc.h"
#include "mediapipe/framework/port/opencv_video_inc.h"
#include "mediapipe/framework/port/parse_text_proto.h"
#include "mediapipe/framework/port/status.h"

#include "mediapipe/framework/formats/detection.pb.h"
#include "mediapipe/framework/formats/landmark.pb.h"
#include "mediapipe/framework/formats/rect.pb.h"

struct Point2D {
    float x;
    float y;
};

namespace GoogleMediapipeDetect {

    class HolisticTrackingDetect {
    public:
        HolisticTrackingDetect();

        int InitModel(const char* model_path);
        int DetectImageDirect(int image_width, int image_height, void* image_data, std::vector<Point2D>& posePoints, std::vector<Point2D>& leftHandPoints, std::vector<Point2D>& rightHandPoints);
        int Release();

    private:
        bool m_bIsInit;
        bool m_bIsRelease;

        std::string m_Video_InputStreamName;
        std::string m_PoseLandmarks_OutputStreamName;
        std::string m_LeftHandLandmarks_OutputStreamName;
        std::string m_RightHandLandmarks_OutputStreamName;
        std::string m_FaceLandmarks_OutputStreamName;

        mediapipe::OutputStreamPoller* m_pPoseLandmarksPoller;
        mediapipe::OutputStreamPoller* m_pLeftHandLandmarksPoller;
        mediapipe::OutputStreamPoller* m_pRightHandLandmarksPoller;
        mediapipe::OutputStreamPoller* m_pFaceLandmarksPoller;

        absl::Status Mediapipe_InitGraph(const char* model_path);
        absl::Status Mediapipe_RunMPPGraph_Direct(int image_width, int image_height, void* image_data, std::vector<Point2D>& posePoints, std::vector<Point2D>& leftHandPoints, std::vector<Point2D>& rightHandPoints);
        absl::Status Mediapipe_ReleaseGraph();
    };

} // namespace GoogleMediapipeDetect

#endif // HOLISTIC_TRACKING_DETECT_H
#include <vector>

#include "HolisticTrackingDetect.h"

GoogleMediapipeDetect::HolisticTrackingDetect::HolisticTrackingDetect()
{
    m_bIsInit = false;
    m_bIsRelease = false;

    m_Video_InputStreamName = "input_video";
    m_PoseLandmarks_OutputStreamName = "pose_landmarks";
    m_LeftHandLandmarks_OutputStreamName = "left_hand_landmarks";
    m_RightHandLandmarks_OutputStreamName = "right_hand_landmarks";
    m_FaceLandmarks_OutputStreamName = "face_landmarks";

    m_pPoseLandmarksPoller = nullptr;
    m_pLeftHandLandmarksPoller = nullptr;
    m_pRightHandLandmarksPoller = nullptr;
    m_pFaceLandmarksPoller = nullptr;
}

int GoogleMediapipeDetect::HolisticTrackingDetect::InitModel(const char* model_path) {
    // 初始化模型
    absl::Status run_status = Mediapipe_InitGraph(model_path);
    if (!run_status.ok()) {
        return 0;
    }
    m_bIsInit = true;
    return 1;
}

int GoogleMediapipeDetect::HolisticTrackingDetect::DetectImageDirect(int image_width, int image_height, void* image_data, std::vector<Point2D>& posePoints, std::vector<Point2D>& leftHandPoints, std::vector<Point2D>& rightHandPoints) {
    // 处理图像并返回关键点位置信息
    if (!m_bIsInit) {
        return 0;
    }

    absl::Status run_status = Mediapipe_RunMPPGraph_Direct(image_width, image_height, image_data, posePoints, leftHandPoints, rightHandPoints);
    if (!run_status.ok()) {
        return 0;
    }
    return 1;
}

int GoogleMediapipeDetect::HolisticTrackingDetect::Release() {
    // 释放资源
    absl::Status run_status = Mediapipe_ReleaseGraph();
    if (!run_status.ok()) {
        return 0;
    }
    m_bIsRelease = true;
    return 1;
}

absl::Status GoogleMediapipeDetect::HolisticTrackingDetect::Mediapipe_InitGraph(const char* model_path)
{
    // 省略模型初始化的代码
    return absl::OkStatus();  // 添加这一行
}

absl::Status GoogleMediapipeDetect::HolisticTrackingDetect::Mediapipe_RunMPPGraph_Direct(int image_width, int image_height, void* image_data, std::vector<Point2D>& posePoints, std::vector<Point2D>& leftHandPoints, std::vector<Point2D>& rightHandPoints)
{
    // 处理图像并返回关键点位置信息
    // 省略处理图像的代码

    // 获取PoseLandmarks
    mediapipe::Packet poseeLandmarksPacket;
    if (m_pPoseLandmarksPoller->QueueSize() != 0 && m_pPoseLandmarksPoller->Next(&poseeLandmarksPacket)) {
        auto& output_landmarks = poseeLandmarksPacket.Get<mediapipe::NormalizedLandmarkList>();

        for (int i = 0; i < output_landmarks.landmark_size(); ++i) {
            const mediapipe::NormalizedLandmark landmark = output_landmarks.landmark(i);
            float x = landmark.x() * image_width;
            float y = landmark.y() * image_height;
            posePoints.emplace_back(Point2D{ x, y });
        }
    }

    // 获取LeftHandLandmarks
    mediapipe::Packet leftHandLandmarksPacket;
    if (m_pLeftHandLandmarksPoller->QueueSize() != 0 && m_pLeftHandLandmarksPoller->Next(&leftHandLandmarksPacket)) {
        auto& output_landmarks = leftHandLandmarksPacket.Get<mediapipe::NormalizedLandmarkList>();

        for (int i = 0; i < output_landmarks.landmark_size(); ++i) {
            const mediapipe::NormalizedLandmark landmark = output_landmarks.landmark(i);
            float x = landmark.x() * image_width;
            float y = landmark.y() * image_height;
            leftHandPoints.emplace_back(Point2D{ x, y });
        }
    }

    // 获取RightHandLandmarks
    mediapipe::Packet rightHandLandmarksPacket;
    if (m_pRightHandLandmarksPoller->QueueSize() != 0 && m_pRightHandLandmarksPoller->Next(&rightHandLandmarksPacket)) {
        auto& output_landmarks = rightHandLandmarksPacket.Get<mediapipe::NormalizedLandmarkList>();

        for (int i = 0; i < output_landmarks.landmark_size(); ++i) {
            const mediapipe::NormalizedLandmark landmark = output_landmarks.landmark(i);
            float x = landmark.x() * image_width;
            float y = landmark.y() * image_height;
            rightHandPoints.emplace_back(Point2D{ x, y });
        }
    }

    return absl::OkStatus();
}

absl::Status GoogleMediapipeDetect::HolisticTrackingDetect::Mediapipe_ReleaseGraph()
{
    // 释放资源的代码

    return absl::OkStatus();  // 添加这一行
}

 BUILD:
cc_binary(
    name = "Mediapipe_Hand_Tracking",
        srcs = ["hand_tracking_api.h","hand_tracking_api.cpp","hand_tracking_detect.h","hand_tracking_detect.cpp","hand_tracking_data.h","hand_gesture_recognition.h","hand_gesture_recognition.cpp"],
    linkshared=True,
        deps = [    
    "@com_google_absl//absl/flags:flag",
    "@com_google_absl//absl/flags:parse",
    "//mediapipe/graphs/hand_tracking:desktop_tflite_calculators",
    "//mediapipe/framework:calculator_framework",
    "//mediapipe/framework/formats:image_frame",
    "//mediapipe/framework/formats:image_frame_opencv",
    "//mediapipe/framework/port:opencv_imgproc",
    "//mediapipe/framework/port:opencv_video",
    "//mediapipe/framework/port:opencv_highgui",
    "//mediapipe/framework/port:parse_text_proto",
    "//mediapipe/framework/port:file_helpers",
    "//mediapipe/framework/port:status",
    ],
)
 

bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1  --action_env PYTHON_BIN_PATH="C:/Python39/python.exe" mediapipe/examples/desktop/holistic_tracking_dll:MediapipeHolisticTracking --verbose_failures

  • 4
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

我救我自己

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值