”站在巨人的肩膀上“参考针对openni 1.0系列的读取,并且利用openni2中自带的历程,多通道数据的读取,得到彩色图像与深度图像,然后利用opencv的mat进行了显示:
#include "iostream"
#include "OpenNI.h"
#include "opencv\cv.h"
#include "opencv\highgui.h"
using namespace std;
using namespace openni;
using namespace cv;
int main(int argc, char** argv)
{
//opencv
//Mat imgDepth(640, 480, IPL_DEPTH_16U, 1);
char key=0;
//1.initialize OpenNI
Status rc = OpenNI::initialize();
if(rc != STATUS_OK)
{
cout << "Initialize failed" << OpenNI::getExtendedError();
cin.get();
cin.get();
return -1;
}
//2.open a device
Device device;
rc = device.open(ANY_DEVICE);
VideoStream depth, image;
if(device.getSensorInfo(SENSOR_DEPTH) != NULL)
{
rc = depth.create(device, SENSOR_DEPTH);
if(rc == STATUS_OK)
{
rc= depth.start();
if(rc != STATUS_OK)
{
cout << "couldn't create depth stream" << OpenNI::getExtendedError();
cin.get();
cin.get();
return -1;
}
}
}
if( device.getSensorInfo(SENSOR_COLOR) != NULL )
{
rc = image.create(device, SENSOR_COLOR);
if(rc == STATUS_OK)
{
rc = image.start();
}
}
VideoStream* streams[] = {&depth, &image};
//videoStream.setVideoMode(videoMode);
VideoFrameRef frame;
if(device.hasSensor(SENSOR_COLOR) && device.hasSensor(SENSOR_DEPTH))
cout << "the color is OK";
while (key != 27)
{
int readyStream = -1;
//for the readFrame, you should waitForAnyStream to get the new frame;
rc = OpenNI::waitForAnyStream(streams, 2, &readyStream);
switch (readyStream)
{
case 0:
//depth
depth.readFrame(&frame);
break;
case 1:
//color
image.readFrame(&frame);
break;
default:
cout << "unexpected stream" << endl;
}
switch(frame.getVideoMode().getPixelFormat())
{
case PIXEL_FORMAT_DEPTH_1_MM:
case PIXEL_FORMAT_DEPTH_100_UM:
{
DepthPixel *pDepth = (DepthPixel*)frame.getData();
Mat imgDEP(frame.getHeight(), frame.getWidth(), CV_16UC1, (void*)frame.getData());
//cout << depth.getMaxPixelValue() << endl;
imgDEP.convertTo( imgDEP, CV_8U, 255.0 / 4096.0);
namedWindow("depth", 1);
imshow("depth", imgDEP);
break;
}
case PIXEL_FORMAT_RGB888:
{
Mat imgRGB(frame.getHeight(), frame.getWidth(), CV_8UC3, (void*)frame.getData()); //const 使用
cvtColor(imgRGB, imgRGB,CV_BGR2RGB );
namedWindow("test", 1);
imshow("test", imgRGB);
break;
}
default:
cout << "unknown format" << endl;
}
key = cvWaitKey(20);
}
image.stop();
image.destroy();
depth.stop();
depth.destroy();
device.close();
OpenNI::shutdown();
cin.get();
cin.get();
return 0;
}
其中要说明两点:
第一点:深度图像的位数是16位的,需要进行转换,其中除以了4096.0,转换为8位的。
参考:
用 OpenCV 畫出 OpenNI 2 的深度、彩色影像
http://viml.nchc.org.tw/blog/paper_info.php?CLASS_ID=1&SUB_ID=1&PAPER_ID=431
Kinect开发教程二:OpenNI读取深度图像与彩色图像并显示http://blog.csdn.net/chenxin_130/article/details/6696187
http://www.pcw8510.com/?p=4009
http://oulehui.blog.163.com/blog/static/7961469820117535714180/(深度图bit解析)
第二点:就是,自己读取的深度图像,总是有边缘效应,需要进行处理。(人旁边的阴影)