代码是通过提取sample里的代码编写的,同时读取深度红外和彩色图,整个流程还是很清晰的,sensor -source-reader - frame - data。详细流程参考http://blog.csdn.net/dustpg/article/details/37987239,这篇博文非常清楚。
//#include "config.h"
#include "stdafx.h"
#include "DepthBasics.h"
#include <opencv2/opencv.hpp>
using namespace std;
#define InfraredSourceValueMaximum static_cast<float>(USHRT_MAX)
#define InfraredOutputValueMinimum 0.01f
#define InfraredOutputValueMaximum 1.0f
#define InfraredSceneValueAverage 0.08f
#define InfraredSceneStandardDeviations 3.0f
// 转换depth图像到cv::Mat
cv::Mat ConvertMat(const UINT16* pBuffer, int nWidth, int nHeight, USHORT nMinDepth, USHORT nMaxDepth)
{
cv::Mat img(nHeight, nWidth, CV_8UC3);
uchar* p_mat = img.data;
const UINT16* pBufferEnd = pBuffer + (nWidth * nHeight);
while (pBuffer < pBufferEnd)
{
USHORT depth = *pBuffer;
BYTE intensity = static_cast<BYTE>((depth >= nMinDepth) && (depth <= nMaxDepth) ? (depth % 256) : 0);
*p_mat = intensity;
p_mat++;
*p_mat = intensity;
p_mat++;
*p_mat = intensity;
p_mat++;
++pBuffer;
}
return img;
}
// 转换color图像到cv::Mat
cv::Mat ConvertMat(const RGBQUAD* pBuffer, int nWidth, int nHeight)
{
cv::Mat img(nHeight, nWidth, CV_8UC3);
uchar* p_mat = img.data;
const RGBQUAD* pBufferEnd = pBuffer + (nWidth * nHeight);
while (pBuffer < pBufferEnd)
{
*p_mat = pBuffer->rgbBlue;
p_mat++;
*p_mat = pBuffer->rgbGreen;
p_mat++;
*p_mat = pBuffer->rgbRed;
p_mat++;
++pBuffer;
}
return img;
}
cv::Mat ConvertMat(const UINT16* pBuffer, int nWidth, int nHeight)
{
cv::Mat img(nHeight, nWidth, CV_8UC3);
uchar* p_mat = img.data;
const UINT16* pBufferEnd = pBuffer + (nWidth * nHeight);
while (pBuffer < pBufferEnd)
{
USHORT depth = *pBuffer;
float intensityRatio = static_cast<float>(*pBuffer) / InfraredSourceValueMaximum;
intensityRatio /= InfraredSceneValueAverage * InfraredSceneStandardDeviations;
intensityRatio = min(InfraredOutputValueMaximum, intensityRatio);
intensityRatio = max(InfraredOutputValueMinimum, intensityRatio);
byte intensity = static_cast<byte>(intensityRatio * 255.0f);
*p_mat = intensity;
p_mat++;
*p_mat = intensity;
p_mat++;
*p_mat = intensity;
p_mat++;
++pBuffer;
}
return img;
}
void main()
{
int depth_width = 512; //depth图像就是这么小
int depth_height = 424;
int color_widht = 1920; //color图像就是辣么大
int color_height = 1080;
cv::Mat depthImg_show = cv::Mat::zeros(depth_height, depth_width, CV_8UC3);//原始UINT16 深度图像不适合用来显示,所以需要砍成8位的就可以了,但是显示出来也不是非常好,最好能用原始16位图像颜色编码,凑合着看了
cv::Mat depthImg = cv::Mat::zeros(depth_height, depth_width, CV_16UC1);//the depth image
cv::Mat colorImg = cv::Mat::zeros(color_height, color_widht, CV_8UC3);//the color image
cv::Mat infraImg = cv::Mat::zeros(depth_height, depth_height, CV_8UC3);//the infrared image
// Current Kinect
IKinectSensor* m_pKinectSensor = NULL;
// Depth reader
IDepthFrameReader* m_pDepthFrameReader = NULL;
// Color reader
IColorFrameReader* m_pColorFrameReader = NULL;
RGBQUAD* m_pColorRGBX = new RGBQUAD[color_widht * color_height];
// Infrared reader
IInfraredFrameReader* m_pInfraredReader = NULL;
//open it!
HRESULT hr;
hr = GetDefaultKinectSensor(&m_pKinectSensor);
if (FAILED(hr))
{
cout << "FUCK! Can not find the Kinect!" << endl;
cv::waitKey(0);
exit(0);
}
if (m_pKinectSensor)
{
// Initialize the Kinect and get the depth reader
IDepthFrameSource* pDepthFrameSource = NULL;
IInfraredFrameSource* pInfraredFrameSource = nullptr;
hr = m_pKinectSensor->Open();
if (SUCCEEDED(hr))
{
hr = m_pKinectSensor->get_DepthFrameSource(&pDepthFrameSource);
hr = m_pKinectSensor->get_InfraredFrameSource(&pInfraredFrameSource);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrameSource->OpenReader(&m_pDepthFrameReader);
hr = pInfraredFrameSource->OpenReader(&m_pInfraredReader);
}
SafeRelease(pDepthFrameSource);
SafeRelease(pInfraredFrameSource);
// for color
// Initialize the Kinect and get the color reader
IColorFrameSource* pColorFrameSource = NULL;
if (SUCCEEDED(hr))
{
hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource);
}
if (SUCCEEDED(hr))
{
hr = pColorFrameSource->OpenReader(&m_pColorFrameReader);
}
SafeRelease(pColorFrameSource);
}
//valify the depth reader
if (!m_pDepthFrameReader)
{
cout << "FUCK! Can not find the m_pDepthFrameReader!" << endl;
cv::waitKey(0);
exit(0);
}
//valify the color reader
if (!m_pColorFrameReader)
{
cout << "FUCK! Can not find the m_pColorFrameReader!" << endl;
cv::waitKey(0);
exit(0);
}
// get the data!
UINT nBufferSize_depth = 0;
UINT16 *pBuffer_depth = NULL;
UINT nBufferSize_infra = 0;
UINT16 *pBuffer_infra = NULL;
UINT nBufferSize_coloar = 0;
RGBQUAD *pBuffer_color = NULL;
char key = 0;
while (true) // 貌似要一直尝试,不一定每次都能读取到图像
{
IDepthFrame* pDepthFrame = NULL;
HRESULT hr = m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hr))
{
USHORT nDepthMinReliableDistance = 0;
USHORT nDepthMaxReliableDistance = 0;
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxReliableDistance);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize_depth, &pBuffer_depth);
depthImg_show = ConvertMat(pBuffer_depth, depth_width, depth_height, nDepthMinReliableDistance, nDepthMaxReliableDistance);
}
}
SafeRelease(pDepthFrame);
//for color
IColorFrame* pColorFrame = NULL;
hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);
ColorImageFormat imageFormat = ColorImageFormat_None;
if (SUCCEEDED(hr))
{
ColorImageFormat imageFormat = ColorImageFormat_None;
if (SUCCEEDED(hr))
{
hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
}
if (SUCCEEDED(hr))
{
hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
}
if (SUCCEEDED(hr))
{
if (imageFormat == ColorImageFormat_Bgra)//这里有两个format,不知道具体含义,大概一个预先分配内存,一个需要自己开空间吧
{
hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize_coloar, reinterpret_cast<BYTE**>(&pBuffer_color));
}
else if (m_pColorRGBX)
{
pBuffer_color = m_pColorRGBX;
nBufferSize_coloar = color_widht * color_height * sizeof(RGBQUAD);
hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize_coloar, reinterpret_cast<BYTE*>(pBuffer_color), ColorImageFormat_Bgra);
}
else
{
hr = E_FAIL;
}
colorImg = ConvertMat(pBuffer_color, color_widht, color_height);
}
SafeRelease(pColorFrame);
}
IInfraredFrame* pInfraredFrame = NULL;
hr = m_pInfraredReader->AcquireLatestFrame(&pInfraredFrame);
if (SUCCEEDED(hr))
{
IFrameDescription* pFrameDescription = NULL;
INT64 nTime = 0;
int nWidth = 0;
int nHeight = 0;
UINT nBufferSize = 0;
UINT16 *pBuffer = NULL;
hr = pInfraredFrame->get_RelativeTime(&nTime);
if (SUCCEEDED(hr))
{
hr = pInfraredFrame->get_FrameDescription(&pFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nWidth);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nHeight);
}
if (SUCCEEDED(hr))
{
hr = pInfraredFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);
}
infraImg = ConvertMat(pBuffer, depth_width, depth_height);
SafeRelease(pFrameDescription);
}
SafeRelease(pInfraredFrame);
cv::imshow("depth", depthImg_show);
cv::imshow("color", colorImg);
cv::imshow("infrared",infraImg);
key = cv::waitKey(1);
if (key == 27)
{
break;
}
}
if (m_pColorRGBX)
{
delete[] m_pColorRGBX;
m_pColorRGBX = NULL;
}
// close the Kinect Sensor
if (m_pKinectSensor)
{
m_pKinectSensor->Close();
}
SafeRelease(m_pKinectSensor);
}