Kinect深度图像采集和Opencv差帧法运动目标检测

环境配置:

1. 添加现有属性列表(opencv300.props)

2.1. VC++目录:包含目录添加C:\Program Files\Microsoft SDKs\Kinect\v1.8\inc;库目录添加C:\Program Files\Microsoft SDKs\Kinect\v1.8\lib\x86

2.2. 链接器-输入-附加依赖项添加Kinect10.lib


Kinect彩色和深度图像数据,以数据流的方式逐帧流出,获取数据流的两种方式:

1. 查询方式:不停的问摄像头拿数据,通过while循环不断催促,拿到数据就跑。

2. 事件方式:有新数据后,唤醒,再拿走数据。等新数据的过程叫一个事件,系统通过一个事件的句柄来标示。

Ref : http://blog.csdn.net/zouxy09/article/details/8146719

#include <windows.h>  
#include <iostream>   
#include <NuiApi.h>  
#include <opencv2/opencv.hpp>

using namespace std;
using namespace cv;

class FrameSubtractionKinect
{
public:
	void ImageOperation(Mat& depthimage, Mat& frame_0, int& num, int& steadyCount);
};

int main(int argc, char*argv[])//int类型的argc,用来统计程序运行时发送给main函数的命令行参数的个数。
                               //char*类型的argv[],为字符串数组,用来存放指向的字符串参数的指针数组,每一个元素指向一个参数。
{
	Mat image, frame_0;
	int num = 1;
	int steadyCount = 0;
	FrameSubtractionKinect detect;

	image.create(240, 320, CV_8UC1);//灰度图表示深度图像

	//1.初始化NUI,传入Depth
	HRESULT hr = NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH);
	if (FAILED(hr))
	{
		cout << "NuiInitialize failed" << endl;
		return hr;
	}

	//2.定义事件句柄
	HANDLE nextColorFrameEvent = CreateEvent(NULL,TRUE,FALSE,NULL);
	HANDLE depthStreamHandle = NULL;

	//3.打开KINECT设备的深度图信息通道,用depthStreamHandle保存该流的句柄
	hr = NuiImageStreamOpen(NUI_IMAGE_TYPE_DEPTH, NUI_IMAGE_RESOLUTION_320x240, 0, 2, nextColorFrameEvent, &depthStreamHandle);
	if (FAILED(hr))
	{
		cout << "Could not open color image stream video" << endl;
		NuiShutdown();
		return hr;
	}
	namedWindow("depthImage", CV_WINDOW_AUTOSIZE);

	//4.开始读取深度数据
	while (1)
	{
		const NUI_IMAGE_FRAME *pImageFrame = NULL;

		//4.1.无限等待新的数据,等到后返回
		if (WaitForSingleObject(nextColorFrameEvent, INFINITE) == 0)
		{
			//4.2.从刚才打开数据流的流句柄中得到该帧数据,读取到的数据地址存到pImageFrame
			hr = NuiImageStreamGetNextFrame(depthStreamHandle, 0, &pImageFrame);
			if (FAILED(hr))
			{
				cout << "Could not get depth image" << endl;
				NuiShutdown();
				return -1;
			}

			INuiFrameTexture *pTexture = pImageFrame->pFrameTexture;
			NUI_LOCKED_RECT LockedRect;

			//4.3.提取数据帧到LockedRect,包括两个数据对象(pitch每行字节数,pBits第一个字节地址)
			pTexture->LockRect(0,&LockedRect,NULL,0);  //锁定数据,读数据时,Kinect就不会去修改
			
			//4.4.确定获得的数据是否有效
			if (LockedRect.Pitch != 0)
			{
				//4.5.将数据转换为OpenCV的Mat格式
				//每个深度数据是2个字节,pitch以字节为单位的,地址的偏移是按LockedRect.pBits的地址类型偏移
				
				for (int i = 0; i < image.rows; i++)
				{
					uchar *ptr = image.ptr<uchar>(i);     //第i行的指针

					uchar *pBufferRun = (uchar*)(LockedRect.pBits) + i*LockedRect.Pitch;
					USHORT *pBuffer = (USHORT*)pBufferRun;

					for (int j = 0; j < image.cols; j++)
					{
						ptr[j] = 255 - (uchar)(256 * pBuffer[j] / 0x0fff);   //直接将数据归一化
					}
				}
				imshow("depthImage", image);

				detect.ImageOperation(image, frame_0, num, steadyCount);
			}
			else
			{
				cout << "Buffer length of received texture is bogus" << endl;
			}

			//5.处理完解锁
			pTexture->UnlockRect(0);
			//6.释放本帧数据
			NuiImageStreamReleaseFrame(depthStreamHandle,pImageFrame);
		}
		if (cvWaitKey(20) == 27)
			break;
	}
	//Nui关闭
	NuiShutdown();
	return 0;
}

//二差帧法检测运动目标
void FrameSubtractionKinect::ImageOperation(Mat& depthimage, Mat& frame_0, int& num, int& steadyCount)
{
	Mat erodeimage;
	Mat background, foreground, foreground_BW;

	if (num == 1)
	{
		background = depthimage.clone();
		frame_0 = background;
	}
	else
	{
		background = frame_0;
		frame_0 = depthimage.clone();
	}

	absdiff(depthimage, background, foreground);
	erode(foreground, erodeimage, getStructuringElement(MORPH_RECT, Size(7, 7)));
	threshold(erodeimage, foreground_BW, 15, 255, 0);

	//imshow("foreground", foreground);
	//imshow("ErodeImage", erodeimage);
	imshow("foreground_BW", foreground_BW);

	num++;
	if (num > 2000000000) num = 10;
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值