-performSelectorOnMainThread:withObject:waitUntilDone: VS dispatch_async(dispatch_get_main_queue(),

默认情况下,

-performSelectorOnMainThread:withObject:waitUntilDone: 

只当主线程的run loop处于默认模式下,才会调度你指定的selector,当run loop处于其他模式下(如tracking mode)时, selector只有在run loop切换会默认模式下时才会被执行。你可以使用
-performSelectorOnMainThread:withObject:waitUntilDone:modes:

来指定模式。

dispatch_async(dispatch_get_main_queue(), ^{ ... })

则与主线程的run loop处 什么模式下无关,它会在run loop释放控制后立即执行你指定的block中的代码。

所以,如果你不关心在什么模式下执行你的代码,那么dispatch_async(dispatch_get_main_queue(), ^{ ... })是一个更好的选择。

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
在 C++ 中使用 Holistic 模型输出右肩坐标的示例代码如下: ```c++ #include <iostream> #include <vector> #include "mediapipe/framework/formats/image_frame.h" #include "mediapipe/framework/formats/landmark.pb.h" #include "mediapipe/framework/formats/landmark_list.pb.h" #include "mediapipe/framework/port/opencv_highgui_inc.h" #include "mediapipe/framework/port/opencv_imgproc_inc.h" #include "mediapipe/framework/port/status.h" #include "mediapipe/framework/port/statusor.h" #include "mediapipe/framework/tool/options_util.h" #include "mediapipe/framework/tool/status_util.h" #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/calculator_graph.h" #include "mediapipe/framework/formats/matrix.h" #include "mediapipe/framework/formats/matrix_data.pb.h" #include "mediapipe/framework/formats/rect.pb.h" #include "mediapipe/framework/formats/rect.pb.h" #include "mediapipe/framework/formats/classification.pb.h" #include "mediapipe/framework/formats/detection.pb.h" #include "mediapipe/framework/formats/image_frame_opencv.h" #include "mediapipe/framework/formats/matrix.h" #include "mediapipe/framework/formats/matrix_data.pb.h" #include "mediapipe/framework/formats/matrix_opencv.h" #include "mediapipe/framework/formats/rect.pb.h" #include "mediapipe/framework/formats/rect.pb.h" #include "mediapipe/framework/formats/classification.pb.h" #include "mediapipe/framework/formats/detection.pb.h" #include "mediapipe/framework/formats/image_frame_opencv.h" #include "mediapipe/framework/formats/rect.pb.h" #include "mediapipe/framework/formats/rect.pb.h" #include "mediapipe/framework/formats/classification.pb.h" #include "mediapipe/framework/formats/detection.pb.h" #include "mediapipe/framework/formats/image_frame_opencv.h" #include "mediapipe/framework/formats/rect.pb.h" #include "mediapipe/framework/formats/rect.pb.h" #include "mediapipe/framework/formats/classification.pb.h" #include "mediapipe/framework/formats/detection.pb.h" #include "mediapipe/framework/formats/image_frame_opencv.h" #include "mediapipe/framework/port/opencv_highgui_inc.h" #include "mediapipe/framework/port/opencv_imgcodecs_inc.h" #include "mediapipe/framework/port/opencv_video_inc.h" #include "mediapipe/framework/port/opencv_videoio_inc.h" #include "mediapipe/util/resource_util.h" constexpr char kInputStream[] = "input_video"; constexpr char kOutputStream[] = "output_video"; constexpr char kLandmarksStream[] = "pose_landmarks"; constexpr char kWindowName[] = "MediaPipe"; using namespace mediapipe; int main() { // 初始化计算图 CalculatorGraphConfig config = mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"( input_stream: "input_video" output_stream: "output_video" node { calculator: "HolisticTrackingCalculator" input_stream: "IMAGE:input_video" output_stream: "IMAGE:output_video" output_stream: "POSE_LANDMARKS:pose_landmarks" node_options: { [type.googleapis.com/mediapipe.HolisticTrackingCalculatorOptions] { min_detection_confidence: 0.5 min_tracking_confidence: 0.5 } } } )"); CalculatorGraph graph; MP_RETURN_IF_ERROR(graph.Initialize(config)); // 打开摄像头或者视频文件 cv::VideoCapture capture; capture.open(0); cv::namedWindow(kWindowName, cv::WINDOW_NORMAL); cv::resizeWindow(kWindowName, 720, 480); // 处理帧数据 while (capture.isOpened()) { bool grabbed = capture.grab(); if (!grabbed) break; cv::Mat input_frame; capture.retrieve(input_frame, cv::CAP_PROP_CONVERT_RGB); // 将 OpenCV 的 Mat 数据转换成 MediaPipe 的 ImageFrame 数据 auto input_frame_mat = absl::make_unique<cv::Mat>(input_frame); auto input_frame_image = absl::make_unique<ImageFrame>(ImageFormat::SRGB, input_frame.cols, input_frame.rows, ImageFrame::kDefaultAlignmentBoundary); cv::Mat input_frame_mat_bgr; cv::cvtColor(input_frame, input_frame_mat_bgr, cv::COLOR_RGB2BGR); cv::Mat input_frame_mat_bgr_flipped; cv::flip(input_frame_mat_bgr, input_frame_mat_bgr_flipped, /*flipcode=*/1); cv::Mat input_frame_mat_bgr_flipped_aligned; cv::Mat temp_output_frame = cv::Mat::zeros(input_frame_mat_bgr_flipped.rows, input_frame_mat_bgr_flipped.cols, input_frame_mat_bgr_flipped.type()); cv::rotate(input_frame_mat_bgr_flipped, temp_output_frame, cv::ROTATE_90_COUNTERCLOCKWISE); cv::rotate(temp_output_frame, input_frame_mat_bgr_flipped_aligned, cv::ROTATE_180); cv::Mat input_frame_mat_aligned; cv::cvtColor(input_frame_mat_bgr_flipped_aligned, input_frame_mat_aligned, cv::COLOR_BGR2RGB); memcpy(input_frame_image->MutablePixelData(), input_frame_mat_aligned.data, input_frame_mat_aligned.total() * input_frame_mat_aligned.elemSize()); input_frame_image->SetColorSpace(ImageFrame::ColorSpace::SRGB); input_frame_image->set_timestamp(Timestamp(capture.get(cv::CAP_PROP_POS_MSEC) * 1000)); // 向计算图输入数据 MP_RETURN_IF_ERROR(graph.AddPacketToInputStream( kInputStream, Adopt(input_frame_image.release()).At(Timestamp(capture.get(cv::CAP_PROP_POS_MSEC) * 1000)))); // 获取输出结果 mediapipe::Packet pose_landmarks_packet; if (graph.GetOutputLandmarkList(kLandmarksStream, &pose_landmarks_packet)) { auto& pose_landmarks = pose_landmarks_packet.Get<mediapipe::NormalizedLandmarkList>(); if (pose_landmarks.landmark_size() > 0) { // 获取右肩坐标 auto right_shoulder = pose_landmarks.landmark(11); std::cout << "Right shoulder coordinate: (" << right_shoulder.x() << ", " << right_shoulder.y() << ", " << right_shoulder.z() << ")" << std::endl; } } // 获取输出图像 mediapipe::Packet output_packet; if (graph.GetOutputPacket(&output_packet, kOutputStream) && !output_packet.IsEmpty()) { auto& output_frame = output_packet.Get<mediapipe::ImageFrame>(); cv::Mat output_mat = mediapipe::formats::MatView(&output_frame); cv::Mat output_mat_bgr; cv::cvtColor(output_mat, output_mat_bgr, cv::COLOR_RGB2BGR); cv::Mat output_mat_bgr_flipped; cv::flip(output_mat_bgr, output_mat_bgr_flipped, /*flipcode=*/0); cv::imshow(kWindowName, output_mat_bgr_flipped); } if (cv::waitKey(5) == 27) break; } // 关闭计算图 MP_RETURN_IF_ERROR(graph.CloseInputStream(kInputStream)); return graph.WaitUntilDone(); } ``` 需要注意的是,Holistic 模型输出的坐标值是归一化的坐标值,范围在[0, 1]之间。如果需要将其转换为图像坐标或者其他坐标系的坐标值,需要进行相应的转换。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值