导 Kinect2库,opencv库,pcl库

43 篇文章 1 订阅
15 篇文章 0 订阅

                                     导 Kinect2库,opencv库,pcl库

Kinect2驱动安装:     https://blog.csdn.net/qq_15204179/article/details/107706758 

 

ndfreenect2.cmake   [Kinect2]

# 查找依赖库
FIND_LIBRARY(freenect2_LIBRARY freenect2
        PATHS ~/freenect2/lib
        NO_DEFAULT_PATH
        )
SET(freenect2_LIBRARIES ${freenect2_LIBRARY} pthread)

# 查找路径
FIND_PATH(freenect2_INCLUDE_DIR libfreenect2/libfreenect2.hpp
        PATHS ~/freenect2/include
        NO_DEFAULT_PATH
        )
SET(freenect2_INCLUDE_DIRS ${freenect2_INCLUDE_DIR})

FindOpenCV.cmake   [OpenCV]


# once done, this will define
#OpenCV_FOUND        -  whether there is OpenCV in the prebuilt libraries
#OpenCV_INCLUDE_DIRS -  include directories for OpenCV
#OpenCV_LIBRARY_DIRS -  library directories for OpenCV
#OpenCV_LIBRARIES    -  link this to use OpenCV

set(OpenCV_DIR ~/3rdparty/OpenCV-3.4.9)

unset(OpenCV_FOUND)

MESSAGE(STATUS "OpenCV_DIR TEST  ${PREBUILT_DIR}")

set(OpenCV_INCLUDE_DIRS "${OpenCV_DIR}/include" "${OpenCV_DIR}/include/opencv")
set(OpenCV_LIB_COMPONENTS opencv_core;opencv_ml;opencv_calib3d;opencv_highgui;opencv_superres;opencv_photo;
        opencv_imgcodecs;opencv_features2d;opencv_viz;opencv_flann;opencv_shape;opencv_stitching;opencv_dnn;
        opencv_videostab;opencv_imgproc;opencv_objdetect;opencv_video;opencv_videoio;opencv_reg;opencv_aruco;
        opencv_dpm;opencv_xobjdetect;opencv_xfeatures2d;opencv_surface_matching;opencv_plot;opencv_hfs;
        opencv_tracking;opencv_rgbd;opencv_text;opencv_dnn_objdetect;opencv_face;opencv_optflow;opencv_bgsegm;
        opencv_bioinspired;opencv_ximgproc;opencv_saliency;opencv_freetype;opencv_stereo;opencv_img_hash;
        opencv_fuzzy;opencv_ccalib;opencv_line_descriptor;opencv_hdf;opencv_datasets;opencv_phase_unwrapping;
        opencv_xphoto;opencv_structured_light)

find_path(OpenCV_INCLUDE_DIRS NAMES opencv.h HINTS ${OpenCV_DIR}/include NO_SYSTEM_ENVIRONMENT_PATH)

set(OpenCV_LIBRARY_DIRS ${OpenCV_DIR}/lib)

FOREACH(cvcomponent ${OpenCV_LIB_COMPONENTS})
  find_library(lib_${cvcomponent} NAMES ${cvcomponent} HINTS ${OpenCV_DIR}/lib NO_DEFAULT_PATH)

  set(OpenCV_LIBRARIES ${OpenCV_LIBRARIES};${lib_${cvcomponent}})
ENDFOREACH()

set(OpenCV_LIBS ${OpenCV_LIBRARIES})

set(OpenCV_INCLUDE_DIRS ${OpenCV_INCLUDE_DIRS};${OpenCV_INCLUDE_DIRS}/opencv)

if (OpenCV_INCLUDE_DIRS AND OpenCV_LIBRARIES)
  set(OpenCV_FOUND TRUE)
endif (OpenCV_INCLUDE_DIRS AND OpenCV_LIBRARIES)

if (OpenCV_FOUND)
  if (NOT OpenCV_FIND_QUIETLY)
    message(STATUS "Found OpenCV: ${OpenCV_LIBRARIES}")
  endif (NOT OpenCV_FIND_QUIETLY)
else(OpenCV_FOUND)
  if (OpenCV_FIND_REQUIRED)
    message(FATAL_ERROR "Could not find the OpenCV library")
  endif ()
endif (OpenCV_FOUND)

CMakeLists.txt

cmake_minimum_required(VERSION 3.16)
project(kinect2demo)

set(CMAKE_CXX_STANDARD 14)

set(OUTPUT_DIRECTORY_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/build/debug)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${OUTPUT_DIRECTORY_ROOT}/bin" CACHE PATH "Runtime directory" FORCE)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${OUTPUT_DIRECTORY_ROOT}/lib" CACHE PATH "Library directory" FORCE)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${OUTPUT_DIRECTORY_ROOT}/lib" CACHE PATH "Archive directory" FORCE)

set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/cmake)
#set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ~/3rdparty/OpenCV-3.4.9/share/OpenCV)

# -------------------------------------------------- 2
find_package(freenect2 REQUIRED)
find_package(OpenCV REQUIRED)
find_package(PCL REQUIRED)

include_directories(${freenect2_INCLUDE_DIR})
include_directories(${OpenCV_INCLUDE_DIRS})
include_directories(${PCL_INCLUDE_DIRS})

link_directories(${OpenCV_LIBRARY_DIRS})
link_directories(${PCL_LIBRARY_DIRS})

add_definitions(${PCL_DEFINITIONS})

add_subdirectory(src)
add_subdirectory(src/camera)


add_executable(kinect2demo main.cpp)

------------------------------------------------------------------------------------------------------------------------------------------------

01-Kinect2Camera.cpp  Kinect2获取保存彩色图及深度图

//
// Created by ty on 20-6-3.
//


#include <iostream>
#include <libfreenect2/libfreenect2.hpp>
#include <libfreenect2/packet_pipeline.h>
#include <libfreenect2/frame_listener_impl.h>
#include <libfreenect2/registration.h>

#include <opencv2/opencv.hpp>

using namespace std;
using namespace cv;


const int ACTION_ESC = 27;
const int ACTION_SPACE = 32;


// 图片的输出目录
const string output_dir = "../output";

/**
 * 获取Kinect2相机的内容
 *
 * in:
 *      Kinect2硬件设备
 * out:
 *      彩色图
 *      深度图
 *
 *  过滤深度值 去掉无限值|NAN
 *  水平翻转
 *  保存彩色图和深度图
 *  缩放为0.5倍
 */
int main(int argc, char *argv[]) {

    // 创建libfreenect2对象
    libfreenect2::Freenect2 freenect2;

    if (freenect2.enumerateDevices() == 0) {
        std::cerr << "当前未发现Kinect2设备" << std::endl;
        return -1;
    }

    const string &serial = freenect2.getDefaultDeviceSerialNumber();

    if (serial.empty()) {
        std::cerr << "设备序列号不存在" << std::endl;
        return -1;
    }

    std::cout << "设备序列号:" << serial << std::endl;

    // 创建数据处理管道
//    libfreenect2::OpenGLPacketPipeline *pipeline = new libfreenect2::CpuPacketPipeline();
    libfreenect2::OpenGLPacketPipeline *pipeline = new libfreenect2::OpenGLPacketPipeline();

    // 打开设备
    libfreenect2::Freenect2Device *device = freenect2.openDevice(serial, pipeline);

    // 创建数据接收监听器
    libfreenect2::SyncMultiFrameListener listener(libfreenect2::Frame::Color | libfreenect2::Frame::Depth);

    // 给设备设置数据监听器
    //颜色设备
    device->setColorFrameListener(&listener);
    //红外设备  红外与深度是一个相机
    device->setIrAndDepthFrameListener(&listener);

    // 启动设备
    device->start();

    // 创建对齐工具,帮我们把rgb和depth对齐(去畸变)
    libfreenect2::Registration *registration = new libfreenect2::Registration(device->getIrCameraParams(),//相机内参,相机中再带存了些;最好自己标定下数据会更准确些
                                                                              device->getColorCameraParams());

    std::cout << "设备固件版本:" << device->getFirmwareVersion() << std::endl;

    libfreenect2::Frame undistorted(512, 424, 4),   // 创建对象接收:已经去畸变的深度图,第3个参数:浮点类型的字节数
            registered(512, 424, 4),    // 创建对象接收:已经对齐的彩色图 (可能会有孔洞)把彩色图对齐到深度图上,第3个参数:浮点类型的字节数
            bigdepth(1920, 1082, 4);    // 创建对象接收:将小深度图对齐到彩色图上得到的大深度图  1920*1082 float; 1082 第一行与最好一行多了两空像素需要过干掉,第3个参数:浮点类型的字节数

    // 创建一个容器,准备接收数据
    libfreenect2::FrameMap frames;

    int index = 0;
    while (true) {//用户按下空格保存图片 按下esc退出
        // 这里会阻塞,直到拿到最新的数据。阻塞才会被释放,必须先释放当前的数据:listener.release(frames);
        listener.waitForNewFrame(frames);
        //取颜色图片
        libfreenect2::Frame *rgb = frames[libfreenect2::Frame::Color];
        //取深度图片
        libfreenect2::Frame *depth = frames[libfreenect2::Frame::Depth];

//        std::cout << "rgb->width: " << rgb->width << " rgb->height: " << rgb->height << std::endl;
//        std::cout << "depth->width: " << depth->width << " depth->height: " << depth->height << std::endl;
        // 彩色图与深度图合并; 对原始数据进行对齐 彩色图映射到深度图上  rgb彩色图 ,undistorted去畸变的深度图,registered配准后的彩色图 把彩色图贴到深度图上了 ;是否允许过滤 把相机不可见的像素过滤掉;bigdepth 输出把深度映射到彩色图上的图 大的深度图
        // bigdepth 输出把深度映射到彩色图上的图 大的深度图 1920*1082 float; 1082 第一行与最好一行多了两空像素需要过干掉
        registration->apply(rgb, depth, &undistorted, &registered, true, &bigdepth);

        // 512x424 已和深度对齐的彩色图
//        Mat registeredRgbMat = Mat(registered.height, registered.width, CV_8UC4, registered.data);
        // 512x424 已去畸变的深度图
//        Mat undistortedDepthMat = Mat(undistorted.height, undistorted.width, CV_32FC1, undistorted.data);
        // 512x424 未去畸变的深度图
//        Mat smallDepthMat = Mat(depth->height, depth->width, CV_32FC1, depth->data);

        // 将kinect类型的数据转成OpenCV的Mat
        //CV_8UC4 u代表无符号 c是通道 通道为4
        // 1920x1080
        // opencv提供Mat构造函数 帮我们把unsigned char* 转成 mat对象
        Mat rgbMat = Mat(rgb->height, rgb->width, CV_8UC4, rgb->data);
        // 1920x1082  CV_32FC1 32位精度浮点数 单通道  bigDepthMat_ 深度值单位毫米,接收的时候要用CV_32FC1数据类型
        Mat bigDepthMat_ = Mat(bigdepth.height, bigdepth.width, CV_32FC1, bigdepth.data);


         //过滤深度值 去掉无限值|NAN, 裁剪
        //       起始列   起始行       总列数       需要的行数
        //  Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); 1920*1082 float 截取深度图 获得1080的长度
       // 截取深度图 获得1080的长度 ,1920*1082 float
        Mat bigDepthMat = bigDepthMat_(Rect(0, 1, bigDepthMat_.cols, bigDepthMat_.rows - 2));
        //过滤深度值 去掉无限值|NAN
        for (int i = 0; i < bigDepthMat.rows; ++i) {
            for (int j = 0; j < bigDepthMat.cols; ++j) {
                float &d = bigDepthMat.at<float>(i, j);
                if (fpclassify(d) == FP_INFINITE || fpclassify(d) == NAN) {
                    d = 0;
                }
            }
        }

        // *  水平翻转
        flip(rgbMat, rgbMat, 1);
        flip(bigDepthMat, bigDepthMat, 1);


        int action = waitKey(30) & 0xFF;

        if (action == ACTION_ESC || action == 'q' || action == 'Q') {//按下q 或 esc 27退出
            break;
        } else if (action == ACTION_SPACE) {//用户按下空格保存32图片
            std::cout << "保存rgb图和depth图" << std::endl;
//            *  保存彩色图和深度图

            // 保存彩图
            stringstream rr;
            rr << output_dir << "/rgb_image_" << index << ".jpg";
            vector<int> params;
            params.push_back(CV_IMWRITE_JPEG_QUALITY);//图片格式
            params.push_back(100);//压缩的比例
            bool rst = imwrite(rr.str(), rgbMat, params);
            if (!rst) {
                std::cerr << "目标目录不存在,请检查后再保存: " << output_dir << std::endl;
                return -1;
            }
            std::cout << "保存彩色图成功:" << rr.str() << std::endl;

            // 保存深度图 深度图全是数字 不是可是的图片,要保存成文件
            // 将数据转成单通道16位UINT,CV_16UC1 整形16位的保存,接收的时候要用CV_32FC1数据类型
            bigDepthMat.convertTo(bigDepthMat, CV_16UC1);
            stringstream dd;
            dd << output_dir << "/depth_image_" << index << ".xml";
            FileStorage fs(dd.str(), FileStorage::WRITE);
            fs << "depth" << bigDepthMat;
            fs.release();
            std::cout << "保存深度值成功:" << dd.str() << std::endl;

        }
//       *  缩放为0.5倍
        resize(rgbMat, rgbMat, Size(), 0.5, 0.5, INTER_LINEAR);
        resize(bigDepthMat, bigDepthMat, Size(), 0.5, 0.5, INTER_LINEAR);

        imshow("color", rgbMat);//彩色图显示
        imshow("depth", bigDepthMat / 4500.0f);//官方推荐显示深度图除4500

        //必须先释放当前的数据  waitForNewFrame才会拿到数据
        listener.release(frames);
    }

    device->stop();//停掉设备
    device->close();//后关闭设备

    delete registration;//回收数据
}

02-PointCloudMaker.cpp  Kinect2转点云图

 

//
// Created by ty on 20-6-3.
//


#include <iostream>

#include <opencv2/opencv.hpp>

#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/io.h>
#include <pcl/visualization/cloud_viewer.h>

using namespace std;
using namespace cv;

typedef pcl::PointXYZRGB PointT;
typedef pcl::PointCloud<PointT> PointCloud;

/**
 * OpenCV
 * PCL
 *
 * 输入:
 *      彩色图
 *      深度图
 *      相机的内参
 *
 * 输出:
 *      点云图
 *
 */
int main(int argc, char *argv[]) {

    // 读取彩色图和深度图
    const Mat &rgbMat = imread("./output1/rgb_image_0.jpg", CV_LOAD_IMAGE_UNCHANGED);
    // 深度图
    Mat depthMat;
    FileStorage fs("./output1/depth_image_0.xml", FileStorage::READ);
    fs["depth"] >> depthMat;
    fs.release();

    // 读取相机内参
    Mat cameraMatrix = cv::Mat_<double>(3, 3);
    FileStorage camera_fs("./calib/calibration_in_params.yml", FileStorage::READ);
    camera_fs["cameraMatrix"] >> cameraMatrix;
    camera_fs.release();

    std::cout << cameraMatrix << std::endl;

    double fx = cameraMatrix.at<double>(0, 0);
    double fy = cameraMatrix.at<double>(1, 1);
    double cx = cameraMatrix.at<double>(0, 2);//u
    double cy = cameraMatrix.at<double>(1, 2);//v

    // 准备一个点云,接收数据
    PointCloud::Ptr cloud(new PointCloud);

    // 遍历每一个深度图的宽高,给点云的每个点指定其颜色值和深度值
    for (int v = 0; v < depthMat.rows; ++v) { // 所有行
        for (int u = 0; u < depthMat.cols; ++u) { // 所有
            // 取出每个像素对应的Z值
            ushort d = depthMat.at<ushort>(v, u);//单位毫米
            double depth_value = double(d) / 1000.0f;//变为米为单位,计算都是用米为单位,存储都是毫米为单位

            if (depth_value == 0 || isnan(depth_value) || abs(depth_value) < 0.0001) {//depth_value==0 或者不是数字 或者绝对值很小比1微米还小的值
                continue;
            }
            // 创建一个点Point
            PointT p;

            // 根据内参计算出其X,Y -----------------------------------------
            p.z = depth_value;
            p.x = (u - cx) * p.z / fx;
            p.y = (v - cy) * p.z / fy;

            // 给其指定颜色
            Vec3b bgr = rgbMat.at<Vec3b>(v, u);
            p.b = bgr[0];
            p.g = bgr[1];
            p.r = bgr[2];

            // 添加到cloud中
            cloud->points.push_back(p);
        }
    }

    // 显示cloud点云
    cloud->width = cloud->points.size();//无序点云的宽即是cloud长度
    cloud->height = 1;
    cloud->is_dense = false;//是否是密集的

    pcl::visualization::CloudViewer viewer("cloud_viewer");

    viewer.showCloud(cloud);
    //保存成点云文件
//    pcl::io::savePCDFileASCII("output/test_pcd.pcd", cloud);
//    std::cerr << "Saved " << cloud.points.size() << " data points to test_pcd.pcd." << std::endl;
    while (!viewer.wasStopped()) {
        //
    }

}

03-PhotoCapture.cpp  Kinect2 实时获取点云图

#include <iostream>
#include <opencv2/opencv.hpp>
#include <sstream>
#include <cstdlib>

#include <pcl/io/pcd_io.h>
#include <pcl/visualization/cloud_viewer.h>
#include <KinectCamera.h>

using namespace std;
using namespace cv;

float qnan_ = std::numeric_limits<float>::quiet_NaN();
const char *cameraInCailFile = "./calib/calibration_in_params.yml";
// 获取目标目录
//const char *output_dir = "./scene";
// 制作模板目录
const char *output_dir = "./capture_temp";

const int ACTION_ESC = 27;
const int ACTION_SPACE = 32;


Eigen::Matrix<float, 1920, 1> colmap;
Eigen::Matrix<float, 1080, 1> rowmap;
const short w = 1920, h = 1080;

// 准备数据
void prepareMake3D(const double cx, const double cy,
                   const double fx, const double fy) {
//    const int w = 512;
//    const int h = 424;
    float *pm1 = colmap.data();
    float *pm2 = rowmap.data();
    for (int i = 0; i < w; i++) {
        *pm1++ = (i - cx + 0.5) / fx;
    }
    for (int i = 0; i < h; i++) {
        *pm2++ = (i - cy + 0.5) / fy;
    }
}

/**
 * 根据内参,合并RGB彩色图和深度图到点云
 * @param cloud
 * @param depthMat
 * @param rgbMat
 */
void getCloud(pcl::PointCloud<pcl::PointXYZRGB>::Ptr &cloud, Mat &depthMat, Mat &rgbMat) {
    const float *itD0 = (float *) depthMat.ptr();
    const char *itRGB0 = (char *) rgbMat.ptr();

    if (cloud->size() != w * h)
        cloud->resize(w * h);

    // 获取点云的点集合的指针
    pcl::PointXYZRGB *itP = &cloud->points[0];
    bool is_dense = true;

    // 遍历所有行
    for (size_t y = 0; y < h; ++y) {

        // 偏差个数, 跳过y行 y * 1920 个
        const unsigned int offset = y * w;
        // 深度图指针加上偏差
        const float *itD = itD0 + offset;
        // 彩色图指针加上偏差
        const char *itRGB = itRGB0 + offset * 4;
        // 获取指定行的系数
        const float dy = rowmap(y);

        // 遍历第y行的所有列
        for (size_t x = 0; x < w; ++x, ++itP, ++itD, itRGB += 4) {
            const float depth_value = *itD / 1000.0f;

            if (!isnan(depth_value) && abs(depth_value) >= 0.0001) {

                const float rx = colmap(x) * depth_value;
                const float ry = dy * depth_value;
                itP->z = depth_value;
                itP->x = rx;
                itP->y = ry;

                itP->b = itRGB[0];
                itP->g = itRGB[1];
                itP->r = itRGB[2];
            } else {
                itP->z = qnan_;
                itP->x = qnan_;
                itP->y = qnan_;

                itP->b = qnan_;
                itP->g = qnan_;
                itP->r = qnan_;
                is_dense = false;
            }
        }
    }
    cloud->is_dense = is_dense;
}


/*
 * 保存Kenect相机的实时图片
 *  按enter键保存
 * */
int main(int argc, char *argv[]) {

    // 显示RGB图
    // 显示深度图
    // 显示合成后的点云图

    auto *camera = new KinectCamera();
//    cv::VideoCapture capture(0);
    if (!camera->isOpened()) {
        std::cout << "无法开启摄像头!" << std::endl;
        return -1;
    }

    Mat cameraMatrix = cv::Mat_<double>(3, 3);
    FileStorage paramFs(cameraInCailFile, FileStorage::READ);
    paramFs["cameraMatrix"] >> cameraMatrix;
    // 内参数据
    double fx = cameraMatrix.at<double>(0, 0);
    double fy = cameraMatrix.at<double>(1, 1);
    double cx = cameraMatrix.at<double>(0, 2);
    double cy = cameraMatrix.at<double>(1, 2);

    prepareMake3D(cx, cy, fx, fy);

    // 准备点云查看工具
    pcl::visualization::PCLVisualizer::Ptr viewer(new pcl::visualization::PCLVisualizer("3D Viewer"));

    // 创建智能指针点云
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGB>());
    viewer->setBackgroundColor(0, 0, 0);

    pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> rgbHandler(cloud);
    viewer->addPointCloud<pcl::PointXYZRGB>(cloud, rgbHandler, "sample cloud");
    viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud");

    int index = 0;
    while (true) {
        cv::Mat rgbMat, depthMat;
        camera->capture(rgbMat, depthMat);


        if (!viewer->wasStopped()) {
            // 将RGB和深度信息合成为一个点云图,保存到cloud中
            getCloud(cloud, depthMat, rgbMat);

            pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> rgbHandler(cloud);
            viewer->updatePointCloud<pcl::PointXYZRGB>(cloud, rgbHandler, "sample cloud");

            viewer->spinOnce();
        }

        int action = cv::waitKey(20) & 0xFF;

        if (action == ACTION_SPACE || action == 'q') {
            // 保存点云
            std::cout << "保存当前的点云" << std::endl;


            stringstream rr;
            rr << output_dir << "/camera_rgb_" << index << ".jpg";
            vector<int> compression_params;
            compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);  //选择jpeg
            compression_params.push_back(100); //在这个填入你要的图片质量
            bool rst = cv::imwrite(rr.str(), rgbMat, compression_params);
            if (!rst) {
                std::cerr << "保存失败,请检查该文件夹是否存在:" << output_dir << std::endl;
                return -1;
            }
            cout << rr.str() << endl;

            depthMat.convertTo(depthMat, CV_16UC1);

            // 保存深度图
            stringstream ss;
            ss << output_dir << "/camera_depth_" << index << ".xml";
            FileStorage fs(ss.str(), FileStorage::WRITE);
            fs << "depth" << depthMat;
            fs.release();

            stringstream ss_pcd;
            ss_pcd << output_dir << "/table_scene_" << index << ".pcd";
            // 保存模板点云图
            pcl::io::savePCDFile(ss_pcd.str(), *cloud);
            index++;
            cout << "成功保存RGB, 深度图, 点云图" << endl;

        }else if(action == ACTION_ESC) {
            // 退出
            break;

        }

        cv::imshow("rgb", rgbMat);
        cv::imshow("depth", depthMat / 4500);

    }
    camera->release();


    return 0;
}

===========================================================================================

Kinect_封装好的驱动

只能获取彩色图

workspas/HandEyeCalibration/cmake/Findfreenect2.cmake

# 查找依赖库
# 头文件include: freenect2_INCLUDE_DIRS
# 依赖库:       freenect2_LIBRARIES

FIND_LIBRARY(freenect2_LIBRARY freenect2
        PATHS ~/freenect2/lib
        NO_DEFAULT_PATH
        )
SET(freenect2_LIBRARIES ${freenect2_LIBRARY} pthread)

# 查找路径
FIND_PATH(freenect2_INCLUDE_DIR libfreenect2/libfreenect2.hpp
        PATHS ~/freenect2/include
        NO_DEFAULT_PATH
        )
SET(freenect2_INCLUDE_DIRS ${freenect2_INCLUDE_DIR})

workspas/HandEyeCalibration/src/camera/CMakeLists.txt

add_library(kinect_camera KinectCamera.cpp KinectCamera.h)
target_link_libraries(kinect_camera ${OpenCV_LIBRARIES} ${freenect2_LIBRARIES})

workspas/HandEyeCalibration/src/camera/KinectCamera.cpp

//
// Created by ty on 20-9-26.
//

#include "KinectCamera.h"


int KinectCamera::init() {


    // 判断已连接的设备个数
    if (freenect2.enumerateDevices() == 0) {
        std::cerr << "未发现Kinect2设备" << std::endl;
        return -1;
    }

    const string &serial = freenect2.getDefaultDeviceSerialNumber();

    if (serial.empty()) {
        std::cerr << "设备序列号不存在" << std::endl;
        return -1;
    }

    std::cout << "设备序列号: " << serial << std::endl;

    // 创建pipeline接收并处理数据
//    auto *pipeline = new libfreenect2::CpuPacketPipeline();
    auto pipeline = new libfreenect2::OpenGLPacketPipeline();

    // 根据序列号开启设备,并设定数据处理通道
    device = freenect2.openDevice(serial, pipeline);

    // 创建数据接收器(监听器),给设备执行监听;
    device->setColorFrameListener(&listener);
    device->setIrAndDepthFrameListener(&listener);

    device->start();

    return 0;
}

void KinectCamera::capture(Mat& outputMat) {

    listener.waitForNewFrame(frames);

    libfreenect2::Frame *rgb = frames[libfreenect2::Frame::Color];
    libfreenect2::Frame *depth = frames[libfreenect2::Frame::Depth];

    Mat colorMat = Mat(rgb->height, rgb->width, CV_8UC4, rgb->data);
    // 图像进行镜像处理
    flip(colorMat, colorMat, 1);

    colorMat.copyTo(outputMat);

    listener.release(frames);

}


void KinectCamera::release() {
    device->stop();
    device->close();
}

KinectCamera::~KinectCamera() {

}


workspas/HandEyeCalibration/src/camera/KinectCamera.h

//
// Created by ty on 20-9-26.
//

#ifndef HANDEYECALIBRATION_KINECTCAMERA_H
#define HANDEYECALIBRATION_KINECTCAMERA_H


#include <iostream>
#include <opencv2/opencv.hpp>
#include <libfreenect2/libfreenect2.hpp>
#include <libfreenect2/packet_pipeline.h>
#include <libfreenect2/frame_listener_impl.h>

using namespace std;
using namespace cv;


class KinectCamera {

private:
    int status = -2; // -2 初始状态, -1 出现错误, 0 正常可用

    // 1. 初始化Kinect2相机对象
    libfreenect2::Freenect2 freenect2;

    libfreenect2::Freenect2Device *device = nullptr;

    libfreenect2::SyncMultiFrameListener listener;

    libfreenect2::FrameMap frames;

public:
    KinectCamera(): listener(libfreenect2::Frame::Color | libfreenect2::Frame::Depth) {
        status = init();
    }

    virtual ~KinectCamera();

    // 判断设备是否开启
    bool isOpened() {
        return status == 0;
    }

    virtual int init();

    virtual void release();

    void capture(Mat& colorMat);

    void capture(Mat& colorMat, Mat& depthMat);
};


#endif //HANDEYECALIBRATION_KINECTCAMERA_H

使用:

#include <libfreenect2/libfreenect2.hpp>
#include <libfreenect2/packet_pipeline.h>
#include <libfreenect2/frame_listener_impl.h>
#include <opencv2/opencv.hpp>


int main(int argc, char *argv[]) {
   KinectCamera* camera = new KinectCamera();

    if (!camera->isOpened()) {
        std::cerr << "相机打开失败" << std::endl;
        return -1;
    }

    while (true) {
    //  2. 循环接收相机数据rgb,depth (一定要在下次wait之前释放掉frames.   
        //listener.release(frames);)

        Mat colorMat, srcMat, gray;

        camera->capture(colorMat);

        // 备份一份原图
        colorMat.copyTo(srcMat);

     // 3. 在rgb彩色图里查找角点
        cvtColor(colorMat, gray, COLOR_BGR2GRAY);

        imageSize = gray.size();

    }

 cv::destroyAllWindows();
    // 设备使用完毕后,及时关闭
    camera->release();


}

 

------------------------------------------------------------------------------------------------------------------------------------

控制台命令启动相机

cd ~/3rdparty/source/libfreenect2-master/build/bin
./Protonect

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

_无往而不胜_

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值