我的环境的KinectSDK2.0+EmguCV3.0.0
依旧还是WinFrom和ImageBox
因为需要用到BodyIndex的数据,但BodyIndex的分辨率和RGB图像的分辨率不同,所以需要用的CoordinateMap类中的坐标转换函数。
然后直接对colorimage的像素点进行操作。
同样,需要用的指针,要把项目调整为允许不安全的代码。
代码和注释如下:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Microsoft.Kinect;
using Emgu.CV;
using Emgu.CV.Structure;
using Emgu.Util;
namespace Kinect_koutu_2
{
public partial class Form1 : Form
{
KinectSensor kinect = null;
MultiSourceFrameReader framereader = null;
FrameDescription fd = null;
FrameDescription cfd = null;
CoordinateMapper coordinate = null;
Image<Bgra, byte> colorimg = null;
DepthSpacePoint[] colorMappedToDepthPoints = null;
byte[] colordata = null;
public Form1()
{
InitializeComponent();
CvInvoke.UseOpenCL = true;
kinect = KinectSensor.GetDefault();
coordinate = kinect.CoordinateMapper;
framereader = kinect.OpenMultiSourceFrameReader(FrameSourceTypes.Depth | FrameSourceTypes.Color | FrameSourceTypes.BodyIndex);
framereader.MultiSourceFrameArrived += Framereader_MultiSourceFrameArrived;
fd = kinect.BodyIndexFrameSource.FrameDescription;
cfd = kinect.ColorFrameSource.FrameDescription;
colorMappedToDepthPoints = new DepthSpacePoint[cfd.Width * cfd.Height];
colorimg = new Image<Bgra, byte>(cfd.Width, cfd.Height);
colordata = new byte[colorimg.Bytes.Count<byte>()];
kinect.Open();
}
private void Framereader_MultiSourceFrameArrived(object sender, MultiSourceFrameArrivedEventArgs e)
{
MultiSourceFrame multiSourceFrame = e.FrameReference.AcquireFrame();
if (multiSourceFrame == null)
return;
ColorFrame cFrame = multiSourceFrame.ColorFrameReference.AcquireFrame();
BodyIndexFrame bframe = multiSourceFrame.BodyIndexFrameReference.AcquireFrame();
DepthFrame dframe = multiSourceFrame.DepthFrameReference.AcquireFrame();
if (dframe == null || bframe == null || cFrame == null)
{
Console.WriteLine("null");
return;
}
cFrame.CopyConvertedFrameDataToArray(colordata, ColorImageFormat.Bgra);
//colorimg.Bytes = colordata;
//imageBox1.Image = colorimg;
using (KinectBuffer dB = dframe.LockImageBuffer())
{
coordinate.MapColorFrameToDepthSpaceUsingIntPtr(dB.UnderlyingBuffer, dB.Size, colorMappedToDepthPoints); //坐标转换并储存到数组
}
using (KinectBuffer kB = bframe.LockImageBuffer())
{
ProcessBodyIndexFrameData(kB.UnderlyingBuffer);
colorimg.Bytes = colordata;
imageBox1.Image = colorimg;
}
dframe.Dispose();
cFrame.Dispose();
bframe.Dispose();
}
private unsafe void ProcessBodyIndexFrameData(IntPtr bodyIndexFrameData)
{
byte* frameData = (byte*)bodyIndexFrameData;
int colorMappedToDepthPointCount = this.colorMappedToDepthPoints.Length;
fixed (DepthSpacePoint* colorMappedToDepthPointsPointer = this.colorMappedToDepthPoints)
{
for (int i = 0; i < colorMappedToDepthPointCount; ++i)
{
float colorMappedToDepthX = colorMappedToDepthPointsPointer[i].X;
float colorMappedToDepthY = colorMappedToDepthPointsPointer[i].Y;
int depthX = (int)(colorMappedToDepthX + 0.5f); //colorimage的像素点的位置在景深图的对应位置
int depthY = (int)(colorMappedToDepthY + 0.5f);
if ((depthX >= 0) && (depthX < 512) && (depthY >= 0) && (depthY < 424))
{
int depthIndex = (depthY * 512) + depthX;
if (frameData[depthIndex] ==255) //在检测范围内frameData[depthIndex] !=255 为检测到人的像素点,不予以操作,并将其他像素点设置为黑色
{
colordata[i * 4] = 0;
colordata[i * 4 + 1] = 0;
colordata[i * 4 + 2] = 0;
colordata[i * 4 + 3] = 255;
}
}
else
{
colordata[i * 4] = 0;
colordata[i * 4 + 1] = 0;
colordata[i * 4 + 2] = 0;
colordata[i * 4 + 3] = 255;
}
}
}
}
private void Form1_FormClosing(object sender, FormClosingEventArgs e)
{
if (this.kinect != null)
{
this.kinect.Close();
this.kinect = null;
}
}
}
}
运行图: