Kinect2.0之使用KinectSDK自带的KinectStudio进行数据采集

去年一直摆弄Kinect2.0,发现二代Kinect和一代存在差别较大,采集程序也大致不一样了。

在这里我主要是介绍两种采集方法,一种是基于KinectSDK驱动直接采集的,能够采集到深度图和彩色图。第二种是采用OPENNI2间接驱动KinectSDK进行采集,能够采集到ONI格式的视频。这篇博文我主要介绍怎么采用KinectSDK Studio分别采集RGB信息和深度信息。

下面C++代码段是在一个线程中采用深度生成器IFrameDescription *pDepthDescription和RGB生成器IFrameDescription *pColorDescription进行数据的生成和保存。

其中RGB信息主要用6个颜色vector进行存储,深度信息主要通过构建深度Map进行存储。其中因为SDK采集到的颜色格式并不是传统的RGB,所以需要对颜色空间进行转换。

void CKinectStudio::ReceiveStudioMessage(WPARAM wParam, LPARAM lParam)
{
	p_StudioFatherHwnd = (HWND)lParam;
	BSTR b = (BSTR)wParam;
	CString videoPath(b);
	SysFreeString(b);
	
	int depth_width, depth_height, color_width, color_height;
	//深度RGB定义															 
	IKinectSensor* m_pKinectSensor = NULL;
	IDepthFrameReader*  m_pDepthFrameReader = NULL;
	IColorFrameReader*  m_pColorFrameReader = NULL;
	IDepthFrameSource* pDepthFrameSource = NULL;
	IColorFrameSource* pColorFrameSource = NULL;
	HRESULT hr;

	//创建上下文
	hr = GetDefaultKinectSensor(&m_pKinectSensor);
	//打开Kinect
	hr = m_pKinectSensor->Open();
	//创建map
	ICoordinateMapper* pCoordinateMapper;
	hr = m_pKinectSensor->get_CoordinateMapper(&pCoordinateMapper);
	//打开深度生成器
	hr = m_pKinectSensor->get_DepthFrameSource(&pDepthFrameSource);
	hr = pDepthFrameSource->OpenReader(&m_pDepthFrameReader);
	IFrameDescription* pDepthDescription;
	hr = pDepthFrameSource->get_FrameDescription(&pDepthDescription);
	pDepthDescription->get_Width(&depth_width); // 512
	pDepthDescription->get_Height(&depth_height); // 424
	SafeRelease(pDepthFrameSource);
	//打开RGB生成器
	hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource);
	hr = pColorFrameSource->OpenReader(&m_pColorFrameReader);
	IFrameDescription* pColorDescription;
	hr = pColorFrameSource->get_FrameDescription(&pColorDescription);
	pColorDescription->get_Width(&color_width); // 1920
	pColorDescription->get_Height(&color_height);//1080
	SafeRelease(pColorFrameSource);

	Mat depthImg_show = Mat::zeros(depth_height, depth_width, CV_8UC3);//原始UINT16 深度图像不适合用来显示
	Mat depthImg = Mat::zeros(depth_height, depth_width, CV_16UC1);
	Mat colorImg = Mat::zeros(color_height, color_width, CV_8UC3);
	Mat m_BodyIndex = Mat::zeros(depth_height, depth_width, CV_8UC1);

	//获取视频信息的缓存区
	UINT nBufferSize_depth = 0;
	UINT16 *pBuffer_depth = NULL;
	UINT nBufferSize_coloar = 0;
	RGBQUAD *pBuffer_color = NULL;
	RGBQUAD* m_pColorRGBX = new RGBQUAD[color_width * color_height];

	std::vector<UINT16> depthBuffer(depth_width * depth_height);
	std::vector<RGBQUAD> colorBuffer(color_width * color_height);
	IBodyIndexFrameSource* pBodyIndexFrameSource = NULL;
	IBodyIndexFrameReader*  m_pBodyIndexFrameReader = NULL;
	HRESULT hrBodyIndex;
	hrBodyIndex = m_pKinectSensor->get_BodyIndexFrameSource(&pBodyIndexFrameSource);
	hrBodyIndex = pBodyIndexFrameSource->OpenReader(&m_pBodyIndexFrameReader);
	HRESULT hResult = S_OK;

	// Source
	IBodyIndexFrameSource* pBodyIndexSource;
	hResult = m_pKinectSensor->get_BodyIndexFrameSource(&pBodyIndexSource);
	// Reader
	IBodyIndexFrameReader* pBodyIndexReader;
	hResult = pBodyIndexSource->OpenReader(&pBodyIndexReader);
	cv::Mat bodyIndexMat(424, 512, CV_8UC3);
	cv::Vec3b color[6];
	color[0] = cv::Vec3b(255, 0, 0);
	color[1] = cv::Vec3b(0, 255, 0);
	color[2] = cv::Vec3b(0, 0, 255);
	color[3] = cv::Vec3b(255, 255, 0);
	color[4] = cv::Vec3b(255, 0, 255);
	color[5] = cv::Vec3b(0, 255, 255);

	PointCloud* pPointCloud = new PointCloud[SumFrame];
	for (int i = 0; i < SumFrame; i++)
	{
		(pPointCloud + i)->arryX = new float[424 * 512];
		memset((pPointCloud + i)->arryX, 0, 424 * 512);
		(pPointCloud + i)->arryY = new float[424 * 512];
		(pPointCloud + i)->arryZ = new float[424 * 512];
		memset((pPointCloud + i)->arryY, 0, 424 * 512);
		memset((pPointCloud + i)->arryZ, 0, 424 * 512);
	}
	int nFrame = 0;
	while (1)
	{
		IDepthFrame* pDepthFrame = NULL;
		IColorFrame* pColorFrame = NULL;
		USHORT nDepthMinReliableDistance = 0;
		USHORT nDepthMaxReliableDistance = 0;
		//获取深度信息
		HRESULT hrDepth = m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame);
		if (FAILED(hrDepth))
		{
			continue;
		}
		hrDepth = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
		hrDepth = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxReliableDistance);
		hrDepth = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize_depth, &pBuffer_depth);
		hrDepth = pDepthFrame->CopyFrameDataToArray(depthBuffer.size(), &depthBuffer[0]);
		depthImg_show = ConvertMat(pBuffer_depth, depth_width, depth_height, nDepthMinReliableDistance, nDepthMaxReliableDistance);
		SafeRelease(pDepthFrame);
		//获取RGB信息
		HRESULT hrColor = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);
		if (FAILED(hrColor))
		{
			continue;
		}
		ColorImageFormat imageFormat = ColorImageFormat_None;
		hrColor = pColorFrame->get_RawColorImageFormat(&imageFormat);
		pBuffer_color = m_pColorRGBX;
		hrColor = pColorFrame->CopyConvertedFrameDataToArray(color_width * color_height * sizeof(RGBQUAD),
			reinterpret_cast<BYTE*>(pBuffer_color), ColorImageFormat_Bgra);
		colorImg = ConvertMat(pBuffer_color, color_width, color_height);
		SafeRelease(pColorFrame);
		IBodyIndexFrame* pBodyIndexFrame = NULL;
		hrBodyIndex = m_pBodyIndexFrameReader->AcquireLatestFrame(&pBodyIndexFrame);
		if (FAILED(hrBodyIndex))
		{
			continue;
		}
		UINT uSize = 0;
		BYTE* pBodyIndexBuffer = NULL;
		pBodyIndexFrame->AccessUnderlyingBuffer(&uSize, &pBodyIndexBuffer);
		for (int y = 0; y < depth_height; ++y)
		{
			for (int x = 0; x < depth_width; ++x)
			{
				DepthSpacePoint depthSpacePoint = { static_cast<float>(x), static_cast<float>(y) };
				UINT16 depth = depthBuffer[y * depth_width + x];
				CameraSpacePoint cameraSpacePoint = { 0.0f, 0.0f, 0.0f };
				pCoordinateMapper->MapDepthPointToCameraSpace(depthSpacePoint, depth, &cameraSpacePoint);
				int uBodyIdx = x + y * depth_width;
				if (pBodyIndexBuffer[uBodyIdx] != 0xff) {
					bodyIndexMat.at<cv::Vec3b>(y, x) = color[pBodyIndexBuffer[uBodyIdx]];
				}
				else {
					bodyIndexMat.at<cv::Vec3b>(y, x) = cv::Vec3b(0, 0, 0);
				}
				if (pBodyIndexBuffer[uBodyIdx] < 6 && (cameraSpacePoint.X > NAGATIVE && cameraSpacePoint.Y > NAGATIVE && cameraSpacePoint.Z > NAGATIVE))
				{
					(pPointCloud + nFrame)->arryX[x + y*depth_height] = cameraSpacePoint.X;
					(pPointCloud + nFrame)->arryY[x + y*depth_height] = cameraSpacePoint.Y;
					(pPointCloud + nFrame)->arryZ[x + y*depth_height] = cameraSpacePoint.Z;
				}
				else
				{
					(pPointCloud + nFrame)->arryX[x + y*depth_height] = 0;
					(pPointCloud + nFrame)->arryY[x + y*depth_height] = 0;
					(pPointCloud + nFrame)->arryZ[x + y*depth_height] = 0;
				}
			}
			if (g_SaveKinectStudio == TRUE)
			{
			::PostMessage(p_StudioFatherHwnd, WM_UPDATE_PROGRESSBAR, (WPARAM)nFrame, (LPARAM)0);
				if (nFrame == 150-1)
				{
					nFrame = 0;
					g_SaveKinectStudio = FALSE;
					for (int i = 0; i < SumFrame; i++)
					{
						::PostMessage(p_StudioFatherHwnd, WM_UPDATE_PROGRESSBAR, (WPARAM)i, (LPARAM)0);
						CString btemp;
						btemp.Format(_T("%d"), i);
						CString txtPath = videoPath +btemp+ _T(".txt");
						CStringA nameA(txtPath);
						char* tempnameA = new char[MAX_PATH];
						memset(tempnameA, 0, MAX_PATH);
						memcpy(tempnameA, nameA.GetBuffer(), strlen(nameA));
						std::ofstream out(tempnameA);
						if (out.is_open())
						{
							for (int y = 0; y < depth_height; ++y)
							{
								for (int x = 0; x < depth_width; ++x)
								{
									if ((pPointCloud + i)->arryX[x + y*depth_height] == 0 && (pPointCloud + i)->arryY[x + y*depth_height] == 0 && (pPointCloud + i)->arryZ[x + y*depth_height] == 0)
									{
										continue;
									}
									else
									{
										out << (pPointCloud + i)->arryX[x + y*depth_height] << " " << (pPointCloud + i)->arryY[x + y*depth_height] << " " << (pPointCloud + i)->arryZ[x + y*depth_height] << endl;
									}
								}
							}
						}
						out.close();
					}
					::PostMessage(p_StudioFatherHwnd, WM_UPDATE_PROGRESSBAR, (WPARAM)0, (LPARAM)0);
				}
			}
		}
		SafeRelease(pBodyIndexFrame);
		resize(colorImg, colorImg, depthImg_show.size());
		BGRImage = &IplImage(colorImg);
		BodyImage = &IplImage(bodyIndexMat);
		if (g_SaveKinectStudio == TRUE) 
		{
			/*char namefile[50];
			CTime tm = CTime::GetCurrentTime();
			CString strBKNum;
			strBKNum.Format(_T("%u"), tm.GetTime());
			strBKNum = _T("F:\\数据11\\") + strBKNum + _T(".jpg");
			gr_save_image(strBKNum, BodyImage);*/
			nFrame++;
		}
		::PostMessage(p_StudioFatherHwnd, WM_SHOW_RGBIMG, (WPARAM)BGRImage, (LPARAM)0);
		::PostMessage(p_StudioFatherHwnd, WM_SHOW_DEPTHIMG, (WPARAM)BodyImage, (LPARAM)1);
		Sleep(20);
	}
	SafeRelease(m_pBodyIndexFrameReader);
	SafeRelease(m_pDepthFrameReader);
	SafeRelease(m_pColorFrameReader);
	if (m_pKinectSensor)
	{
		m_pKinectSensor->Close();
	}
	SafeRelease(m_pKinectSensor);
	//gr_init_studio(videoPath);
}
下面程序片段是保存图片的程序
void CKinectStudio::gr_save_image(CString &strImageName, IplImage* img)
{
	CStringA name(strImageName);
	char* tempname = new char[MAX_PATH];
	memset(tempname, 0, MAX_PATH);
	memcpy(tempname, name.GetBuffer(), strlen(name));
	cvSaveImage(tempname, img);
	delete tempname;
}
下面是释放Kinect2.0上下文,深度生成器和RGB生成器的资源的函数。
template<class Interface>
inline void CKinectStudio::SafeRelease(Interface *& pInterfaceToRelease)
{
	if (pInterfaceToRelease != NULL) {
		pInterfaceToRelease->Release();
		pInterfaceToRelease = NULL;
	}
}
下面是图像从IPlmage转换到Mat的过程函数。
// 转换color图像到cv::Mat
Mat CKinectStudio::ConvertMat(const RGBQUAD* pBuffer, int nWidth, int nHeight)
{
	//cv::Mat img(nHeight, nWidth, CV_8UC3);
	Mat img(nHeight, nWidth, CV_8UC3);
	uchar* p_mat = img.data;
	const RGBQUAD* pBufferEnd = pBuffer + (nWidth * nHeight);
	while (pBuffer < pBufferEnd)
	{
		*p_mat = pBuffer->rgbBlue;
		p_mat++;
		*p_mat = pBuffer->rgbGreen;
		p_mat++;
		*p_mat = pBuffer->rgbRed;
		p_mat++;
		++pBuffer;
	}
	return img;
}
下面是深度图Map转换成Mat的过程函数。
// 转换depth图像到cv::Mat
Mat CKinectStudio::ConvertMat(const UINT16* pBuffer, int nWidth, int nHeight, USHORT nMinDepth, USHORT nMaxDepth)
{
	Mat img(nHeight, nWidth, CV_8UC3);
	uchar* p_mat = img.data;
	const UINT16* pBufferEnd = pBuffer + (nWidth * nHeight);
	while (pBuffer < pBufferEnd)
	{
		USHORT depth = *pBuffer;
		BYTE intensity = static_cast<BYTE>((depth >= nMinDepth) && (depth <= nMaxDepth) ? (depth % 256) : 0);
		*p_mat = intensity;
		p_mat++;
		*p_mat = intensity;
		p_mat++;
		*p_mat = intensity;
		p_mat++;
		++pBuffer;
	}
	return img;
}


以上这些主要利用了Kinect2.0的SDK驱动进行采集深度信息和RGB。并并不能保存ONI格式的视频流。要保存食品类格式需要自己利用OPENNI2驱动Kinect2.0。

下一篇博文会写到这种方法。

以下是Kinect的样图



  • 2
    点赞
  • 43
    收藏
    觉得还不错? 一键收藏
  • 15
    评论
使用Kinect2.0获取点云的过程可以通过以下步骤实现。首先,需要搭建相应的开发环境,包括安装Windows系统和Visual Studio 2019等软件。可以参考\[2\]中提供的源码和项目模板进行环境搭建。接下来,需要使用Kinect2.0相机进行点云图像的捕获。Kinect2.0是一款RGB-D相机,支持普通相机的拍摄和脉冲测量深度信息。可以参考\[1\]中提供的开发源码,了解Kinect2.0的原理和使用方法。在代码中,可以通过调用相应的函数获取相机的深度信息和RGB图像。最后,可以根据需要对获取的点云数据进行处理和拼接,实现三维点云的应用。 #### 引用[.reference_title] - *1* *2* [深度相机Kinect2.0三维点云拼接实验(四)](https://blog.csdn.net/qq_42144047/article/details/123449528)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control,239^v3^insert_chatgpt"}} ] [.reference_item] - *3* [Kinect2.0点云数据获取](https://blog.csdn.net/weixin_42651748/article/details/112053649)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 15
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值