Azure Kinect RGB图、深度图和点云信息的获取与配准(ROS)

本文提供使用Azure Kinect API(C++)获取RGB图、深度图和点云信息的代码样例. 前提是以配置好Azure Kinect 传感器 SDK以及Azure_Kinect_ROS_Driver.

1.C++代码

该程序可获取RGB图、深度图和点云信息, 并将RGB图像与深度图像配准
完整代码如下:

// C++
#include <iostream>
#include <chrono>
#include <string>

#include <ros/ros.h>
// OpenCV
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
// Kinect DK
#include <k4a/k4a.hpp>
//#include <k4a/k4a.h>

using namespace cv;
using namespace std;

// 宏
// 方便控制是否 std::cout 信息
#define DEBUG_std_cout 0

int main(int argc, char *argv[]) {


    ros::init(argc, argv, "k4a_show");
    ros::NodeHandle n;

    /*
        找到并打开 Azure Kinect 设备
    */
    // 发现已连接的设备数
    
    const uint32_t device_count = k4a::device::get_installed_count();
    if (0 == device_count) {
        std::cout << "Error: no K4A devices found. " << std::endl;
        return -1;
    } else {
    std::cout << "Found " << device_count << " connected devices. " << std::endl;
     if (1 != device_count)// 超过1个设备,也输出错误信息。
        {
            std::cout << "Error: more than one K4A devices found. " << std::endl;
            return -1;
             } else// 该示例代码仅限对1个设备操作
        {
            std::cout << "Done: found 1 K4A device. " << std::endl;
        }
    }
    // 打开(默认)设备
    k4a::device device = k4a::device::open(K4A_DEVICE_DEFAULT);
    std::cout << "Done: open device. " << std::endl;

    /*
        检索并保存 Azure Kinect 图像数据
    */
    // 配置并启动设备
    k4a_device_configuration_t config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL;
    config.camera_fps = K4A_FRAMES_PER_SECOND_30;
    config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32;
    config.color_resolution = K4A_COLOR_RESOLUTION_1080P;
    config.depth_mode = K4A_DEPTH_MODE_NFOV_UNBINNED;
    //config.depth_mode = K4A_DEPTH_MODE_WFOV_2X2BINNED;
    config.synchronized_images_only = true;// ensures that depth and color images are both available in the capture
    device.start_cameras(&config);
    std::cout << "Done: start camera." << std::endl;

    
     // 稳定化
    k4a::capture capture;
    int iAuto = 0;//用来稳定,类似自动曝光
    int iAutoError = 0;// 统计自动曝光的失败次数
     while (true) {
        if (device.get_capture(&capture,std::chrono::milliseconds(K4A_WAIT_INFINITE))) {
            std::cout << iAuto << ". Capture several frames to give auto-exposure" << std::endl;
            
            // 跳过前 n 个(成功的数据采集)循环,用来稳定
            if (iAuto != 30) {
                iAuto++;
                continue;
            } else {
                std::cout << "Done: auto-exposure" << std::endl;
                break;// 跳出该循环,完成相机的稳定过程
            }
            
        } else {
            std::cout << iAutoError << ". K4A_WAIT_RESULT_TIMEOUT." << std::endl;
            if (iAutoError != 30) {
                iAutoError++;
                continue;
            } else {
                std::cout << "Error: failed to give auto-exposure. " << std::endl;
                return -1;
            }
        }
    }
    std::cout << "-----------------------------------" << std::endl;
    std::cout << "----- Have Started Kinect DK. -----" << std::endl;
    std::cout << "-----------------------------------" << std::endl;
     // 从设备获取捕获
    k4a::image rgbImage;
    k4a::image depthImage;
    k4a::image irImage;
    k4a::image transformed_depthImage;  
    k4a::image xyzImage;     //xyz坐标点云信息

    cv::Mat cv_rgbImage_with_alpha;
    cv::Mat cv_rgbImage_no_alpha;
    cv::Mat cv_depth;
    cv::Mat cv_depth_8U;
    cv::Mat cv_irImage;
    cv::Mat cv_irImage_8U;
    cv::Mat cv_xyzImage;// 16位有符号
    cv::Mat cv_xyzImage_32F;// 32位float
    
     while (ros::ok())
    {
        if (device.get_capture(&capture,std::chrono::milliseconds(K4A_WAIT_INFINITE))) {
            // rgb
            // * Each pixel of BGRA32 data is four bytes. The first three bytes represent Blue, Green,
            // * and Red data. The fourth byte is the alpha channel and is unused in the Azure Kinect APIs.
            rgbImage = capture.get_color_image();
            #if DEBUG_std_cout == 1
            std::cout << "[rgb] " << "\n"
                << "format: " << rgbImage.get_format() << "\n"
                << "device_timestamp: " << rgbImage.get_device_timestamp().count() << "\n"
                << "system_timestamp: " << rgbImage.get_system_timestamp().count() << "\n"
                << "height*width: " << rgbImage.get_height_pixels() << ", " << rgbImage.get_width_pixels()
                << std::endl;
            #endif

            // depth
            // * Each pixel of DEPTH16 data is two bytes of little endian unsigned depth data. The unit of the data is in
            // * millimeters from the origin of the camera.
            depthImage = capture.get_depth_image();
            #if DEBUG_std_cout == 1
            std::cout << "[depth] " << "\n"
                << "format: " << depthImage.get_format() << "\n"
                << "device_timestamp: " << depthImage.get_device_timestamp().count() << "\n"
                << "system_timestamp: " << depthImage.get_system_timestamp().count() << "\n"
                << "height*width: " << depthImage.get_height_pixels() << ", " << depthImage.get_width_pixels()
                << std::endl;
            #endif

            // ir
            // * Each pixel of IR16 data is two bytes of little endian unsigned depth data. The value of the data represents
            // * brightness.
            irImage = capture.get_ir_image();
            #if DEBUG_std_cout == 1
            std::cout << "[ir] " << "\n"
                << "format: " << irImage.get_format() << "\n"
                << "device_timestamp: " << irImage.get_device_timestamp().count() << "\n"
                << "system_timestamp: " << irImage.get_system_timestamp().count() << "\n"
                << "height*width: " << irImage.get_height_pixels() << ", " << irImage.get_width_pixels()
                << std::endl;
            #endif

            //深度图和RGB图配准
            k4a::calibration k4aCalibration = device.get_calibration(config.depth_mode, config.color_resolution);// Get the camera calibration for the entire K4A device, which is used for all transformation functions.
            k4a::transformation k4aTransformation = k4a::transformation(k4aCalibration);
            transformed_depthImage = k4aTransformation.depth_image_to_color_camera(depthImage);
            cv_rgbImage_with_alpha = cv::Mat(rgbImage.get_height_pixels(), rgbImage.get_width_pixels(), CV_8UC4,
                                             (void *) rgbImage.get_buffer());
            cv::cvtColor(cv_rgbImage_with_alpha, cv_rgbImage_no_alpha, cv::COLOR_BGRA2BGR);
            cv_depth = cv::Mat(transformed_depthImage.get_height_pixels(), transformed_depthImage.get_width_pixels(), CV_16U,
                               (void *) transformed_depthImage.get_buffer(), static_cast<size_t>(transformed_depthImage.get_stride_bytes()));
            cv_depth.convertTo(cv_depth_8U, CV_8U, 1);
            cv_irImage = cv::Mat(irImage.get_height_pixels(), irImage.get_width_pixels(), CV_16U,
                                 (void *) irImage.get_buffer(), static_cast<size_t>(irImage.get_stride_bytes()));
            cv_irImage.convertTo(cv_irImage_8U, CV_8U, 1);
            

            // 点云
            /*
            Each pixel of the xyz_image consists of three int16_t values, totaling 6 bytes. The three int16_t values are the X, Y, and Z values of the point.
            我们将为每个像素存储三个带符号的 16 位坐标值(以毫米为单位)。 因此,XYZ 图像步幅设置为 width * 3 * sizeof(int16_t)。 
            数据顺序为像素交错式,即,X 坐标 – 像素 0,Y 坐标 – 像素 0,Z 坐标 – 像素 0,X 坐标 – 像素 1,依此类推。
            如果无法将某个像素转换为 3D,该函数将为该像素分配值 [0,0,0]。
            */
            xyzImage = k4aTransformation.depth_image_to_point_cloud(depthImage, K4A_CALIBRATION_TYPE_DEPTH);
            cv_xyzImage = cv::Mat(xyzImage.get_height_pixels(), xyzImage.get_width_pixels(), CV_16SC3, (void *)xyzImage.get_buffer(), static_cast<size_t>(xyzImage.get_stride_bytes()));
            cv_xyzImage.convertTo(cv_xyzImage_32F, CV_32FC3, 1.0/1000, 0);// 转为float,同时将单位从 mm 转换为 m.


            // show image
            cv::imshow("color", cv_rgbImage_no_alpha);
            cv::imshow("depth", cv_depth_8U);
            cv::imshow("ir", cv_irImage_8U);
            cv::imshow("xyz",cv_xyzImage_32F);
            waitKey(30);
        
             
            //打印xyz坐标
            Vec3f vec = cv_xyzImage_32F.at<Vec3f>(300, 300);
            cout << vec[0] << "," << vec[1] << "," << vec[2] << endl;


            cv_rgbImage_with_alpha.release();
            cv_rgbImage_no_alpha.release();
            cv_depth.release();
            cv_depth_8U.release();
            cv_irImage.release();
            cv_irImage_8U.release();
            cv_xyzImage.release();

            capture.reset();
            
            }
            else{
            std::cout << "false: K4A_WAIT_RESULT_TIMEOUT." << std::endl;
        }
    }
    cv::destroyAllWindows();

    
    // 释放,关闭设备
    rgbImage.reset();
    depthImage.reset();
    irImage.reset();
    capture.reset();
    device.close();

    return 0;
}

2.CMakeLists

SET(CMAKE_CXX_FLAGS "-std=c++0x")

find_package(catkin REQUIRED COMPONENTS
  roscpp
  std_msgs
  sensor_msgs
  image_transport
  cv_bridge
)
find_package(k4a REQUIRED)
find_package( OpenCV REQUIRED )

catkin_package(
)

include_directories(
  ${catkin_INCLUDE_DIRS}
)

add_executable( k4a_text src/k4a_text.cpp )
add_dependencies( k4a_text ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS} )
target_link_libraries( k4a_text
  PRIVATE k4a::k4a
  ${OpenCV_LIBRARIES}
  ${catkin_LIBRARIES}
)

重点是

find_package(k4a REQUIRED)
target_link_libraries( k4a_text
	PRIVATE k4a::k4a
	${OpenCV_LIBRARIES}
	${catkin_LIBRARIES}
)

3.RGB图像与深度图像配准

Azure Kinect获取的RGB图像分辨率有

K4A_COLOR_RESOLUTION_OFF = 0, /**< Color camera will be turned off with this setting */
K4A_COLOR_RESOLUTION_720P,    /**< 1280 * 720  16:9 */
K4A_COLOR_RESOLUTION_1080P,   /**< 1920 * 1080 16:9 */
K4A_COLOR_RESOLUTION_1440P,   /**< 2560 * 1440 16:9 */
K4A_COLOR_RESOLUTION_1536P,   /**< 2048 * 1536 4:3  */
K4A_COLOR_RESOLUTION_2160P,   /**< 3840 * 2160 16:9 */
K4A_COLOR_RESOLUTION_3072P,   /**< 4096 * 3072 4:3  */

深度图像分辨率有

K4A_DEPTH_MODE_OFF = 0,        /**< Depth sensor will be turned off with this setting. */
K4A_DEPTH_MODE_NFOV_2X2BINNED, /**< Depth captured at 320x288*/
K4A_DEPTH_MODE_NFOV_UNBINNED,  /**< Depth captured at 640x576*/
K4A_DEPTH_MODE_WFOV_2X2BINNED, /**< Depth captured at 512x512*/
K4A_DEPTH_MODE_WFOV_UNBINNED,  /**< Depth captured at 1024x1024*/
K4A_DEPTH_MODE_PASSIVE_IR,     /**< Passive IR only, captured at 1024x1024. */

配准后可获得与RGB图像分辨率相同的深度图像, 具体代码如下:

 k4a::calibration k4aCalibration = device.get_calibration(config.depth_mode, config.color_resolution);// Get the camera calibration for the entire K4A device, which is used for all transformation functions.
 k4a::transformation k4aTransformation = k4a::transformation(k4aCalibration);
 transformed_depthImage = k4aTransformation.depth_image_to_color_camera(depthImage);

4.xyz坐标的获取

将深度图像转化为点云信息

xyzImage = k4aTransformation.depth_image_to_point_cloud(depthImage, K4A_CALIBRATION_TYPE_DEPTH);
cv_xyzImage = cv::Mat(xyzImage.get_height_pixels(), xyzImage.get_width_pixels(), CV_16SC3, (void *)xyzImage.get_buffer(), static_cast<size_t>(xyzImage.get_stride_bytes()));
cv_xyzImage.convertTo(cv_xyzImage_32F, CV_32FC3, 1.0/1000, 0);// 转为float,同时将单位从 mm 转换为 m.

上述代码获取点云信息, 存储在cv_xyzImage_32F中, 使用cv::Mat.at<Vec3f>(int x, int y)可获取相应像素位置的xyz坐标.

Vec3f vec = cv_xyzImage_32F.at<Vec3f>(300, 300);
cout << vec[0] << "," << vec[1] << "," << vec[2] << endl;

5.参考

RGB-D相机(Azure Kinect DK)RGB图、深度图的获取,配准与保存:https://blog.csdn.net/Zlp19970106/article/details/107120743

  • 3
    点赞
  • 54
    收藏
    觉得还不错? 一键收藏
  • 5
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值