Kinect尝鲜(3)——轮询模型

  在《Kinect尝鲜(1)》中提到了Kinect程序的两种模型——事件模型和轮询模型。其中事件模型是通过C#的事件与委托的编程方式,在Kinect采集完成一帧的数据后触发某事件,通过该事件委托的方法完成相关的数据处理。而轮询模型则是将控制权还给应用程序,由应用程序向Kinect主动去“要”数据。事件模型的开发难度教低,同时限制也比较大;而轮询模型则更高效,更适合多线程应用程序。

事件模型

private void StartKinect()
{
    if (KinectSensor.KinectSensors.Count <= 0)
    {
        MessageBox.Show("No Kinect device foound!");
        return;
    }
    _kinect = KinectSensor.KinectSensors[0];

    _kinect.ColorStream.Enabl(ColorImageFormat.RgbResolution640x480Fps30);
    _kinect.DepthStream.Enabl(DepthImageFormat.Resolution640x480Fps30);
    _kinect.SkeletonStream.Enable();

    _kinect.ColorFrameReady += newEventHandler<ColorImageFrameReadyEventArgs(KinectColorFrameReady);
    _kinect.DepthFrameReady += newEventHandler<DepthImageFrameReadyEventArgs(KinectDepthFrameReady);
    _kinect.SkeletonFrameReady += newEventHandler<SkeletonFrameReadyEventArgs(KinectSkeletonFrameReady);
    _kinect.Start();
}

  上面代码是一个典型的事件模型,ColorFrameReady、DepthFrameReady和SkeletonFrameReady是Kinect封装好的三种事件,可以在其被触发的时候执行委托方法KinectColorFrameReady、KinectDepthFrameReady和KinectSkeletonFrameReady,这三个方法都是自定义的。以KinectColorFrameReady为例:

private void KinectColorFrameReady(object sender,ColorImageFrameReadyEventArgs e)
{
    using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame())
    {
        if (colorImageFrame == null)
            return;
        byte[] pixels = new byte[colorImageFrame.PixelDataLength];
        colorImageFrame.CopyPixelDataTo(pixels);
        int stride = colorImageFrame.Width * 4;
        colorImage.Source = BitmapSource.Create(colorImageFrame.Width, colorImageFrame.Height, 96, 96, PixelFormats.Bgr32, null, pixels, stride);
    }
}

  每次Kinect已经采集到一帧的彩色图像数据后会触发ColorFrameReady事件,该事件委托执行KinectColorFrameReady方法,在该方法中将彩色视频流绘制到colorImage控件上。

轮询模型

  在轮询模型中,既然主动权被交还给应用程序,那么我希望将Kinect数据收集与处理与应用程序逻辑分隔开,于是将Kinect有关的方法封装到KinectX类中,其中CustomKinectException是自定义的异常。

public KinectX()
{
    if (KinectSensor.KinectSensors.Count <= 0)
    {
        throw new CustomKinectException("No Kinect Found");
    }
    _kinect = KinectSensor.KinectSensors[0];
    _kinect.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
    _kinect.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);
    _kinect.SkeletonStream.Enable();
}

public void Start(/*args*/)
{
    /*......*/
    _kinect.Start();
}

  在应用程序中启动Kinect后,可以将Kinect相关的内容放到另一个线程中执行。

private void Window_Loaded(object sender, RoutedEventArgs e)
{
    kinectX = new KinectX();
    try
    {
        kinectX.Start(KinectX.StartModel.StreamAll);
    }
    catch (CustomKinectException exc)
    {
        textBlock.Text = exc.ToString();
    }
    renderThread = new Thread(new ThreadStart(RenderImage));
    renderThread.Start();
}

private void RenderImage()
{
    while (isWindowsClosing == false) 
    {
        kinectX.GetColorStream();
        kinectX.GetDepthStream();
        if (kinectX.ColorImageAvailable == false)
            continue;
        if (kinectX.DepthImageAvailable == false)
            continue;
        colorImage.Dispatcher.Invoke(
            delegate
            {
                colorImage.Source = BitmapSource.Create(kinectX.colorImageFrameWidth, kinectX.colorImageFrameHeight, 96, 96, PixelFormats.Bgr32, null, kinectX.GetColorPixelsData, kinectX.colorStride);
            });
        depthImage.Dispatcher.Invoke(
            delegate
            {
                depthImage.Source = BitmapSource.Create(kinectX.depthImageFrameWidth, kinectX.depthImageFrameHeight, 96, 96, PixelFormats.Bgr32, null, kinectX.GetDepthColorBytePixelData, kinectX.depthStride);
            });
    }
}

  上面代码中kinectX.GetColorStream()和kinectX.GetDepthStream()是使用轮询模型向Kinect“要”数据,具体内容如下:

public void GetColorStream()
{
    using (ColorImageFrame colorImageFrame = _kinect.ColorStream.OpenNextFrame(30))
    {
        if (colorImageFrame == null)
        {
            ColorImageAvailable = false;
            return;
        }
        byte[] pixels = new byte[colorImageFrame.PixelDataLength];
        colorImageFrame.CopyPixelDataTo(pixels);
        colorImageFrameWidth = colorImageFrame.Width;
        colorImageFrameHeight = colorImageFrame.Height;
        colorStride = colorImageFrame.Width * 4;
        colorPixelsData = pixels;
        ColorImageAvailable = true;
    }
}

public void GetDepthStream()
{
    using (DepthImageFrame depthImageFrame = _kinect.DepthStream.OpenNextFrame(30))
    {
        if (depthImageFrame == null)
        {
            depthImageAvailable = false;
            return;
        }
        short[] depthPixelData = new short[depthImageFrame.PixelDataLength];
        depthImageFrame.CopyPixelDataTo(depthPixelData);
        depthImageFrameWidth = depthImageFrame.Width;
        depthImageFrameHeight = depthImageFrame.Height;
        byte[] pixels = ConvertDepthFrameToColorFrame(depthPixelData, _kinect.DepthStream);
        depthBytePixelsData = pixels;
        depthStride = depthImageFrame.Width * 4;
        depthImageAvailable = true;
    }
}

  然而在主线程和XAML控件交互时,又使用了委托机制去向kinectX对象“要”数据,这样处理不是很高效。但当前还没有想到比较好的解决办法,待日后解决此问题后修改。

  以深度数据为例。_kinect.DepthStream.OpenNextFrame(30)意思是让Kinect返回下一帧的深度数据流,时间间隔为30ms。由于启动时选择的帧率时30FPS,所以每隔30ms去“要”一次数据比较合适。如果参数设置为0,也并不是时间间隔为0,因为轮询模型下该方法调用也需要消耗一定时间,虽然非常小,并且在如此短的时间间隔内Kinect并不能采集完一帧的数据,所以此时返回的depthImageFrame为null,多次方法调用只能获取一次有效的结果,这样会造成不必要的资源浪费。

完整的KinectX代码

class KinectX
{
    private KinectSensor _kinect;
    private DepthImageStream depthImageStream;
    private ColorImageStream colorImageStream;
    private SkeletonStream skeletonStream;
    private SkeletonFrame skeletonFrame;
    private Skeleton[] skeletons;
    private WriteableBitmap manBitmap;
    private Int32Rect manImageRect;
    private Joint[] joints;
    const float maxDepthDistance = 4095;
    const float minDepthDistance = 850;
    const float maxDepthDistancOddset = maxDepthDistance - minDepthDistance;
    public int manBitmapStride;
    public int colorStride;
    public int depthStride;
    public int colorImageFrameWidth;
    public int colorImageFrameHeight;
    public int depthImageFrameWidth;
    public int depthImageFrameHeight;
    private const int redIndex = 2;
    private const int greenIndex = 1;
    private const int blueIndex = 0;
    private static readonly int bgr32BytesPerPixel = (PixelFormats.Bgr32.BitsPerPixel + 7) / 8;
    private static readonly int[] intensityShiftByPlayerR = { 1, 2, 0, 2, 0, 0, 2, 0 };
    private static readonly int[] intensityShiftByPlayerG = { 1, 2, 2, 0, 2, 0, 0, 1 };
    private static readonly int[] intensityShiftByPlayerB = { 1, 0, 2, 2, 0, 2, 0, 2 };
    private short[] depthShortPixelsData;
    private byte[] colorPixelsData;
    private byte[] depthBytePixelsData;
    private bool colorImageAvailable;
    private bool depthImageAvailable;
    public enum StartModel
    {
        EventAllFrame,
        EventApartFrame,
        EventColorFrame,
        EventDepthFrame,
        EventSkeletonFrame,
        StreamAll,
        StreamColor,
        StreamSkeleton,
        StreamDepth
    };

    public KinectX()
    {
        if (KinectSensor.KinectSensors.Count <= 0)
        {
            throw new CustomKinectException("No Kinect Found");
        }
        _kinect = KinectSensor.KinectSensors[0];
        _kinect.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
        _kinect.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);
        _kinect.SkeletonStream.Enable();
    }

    public void Start(StartModel startModel)
    {
        switch (startModel)
        {
            case (StartModel.EventAllFrame):
                {
                    _kinect.AllFramesReady += KinectAllFramesReady;
                    break;
                }
            case (StartModel.EventApartFrame):
                {
                    _kinect.ColorFrameReady += KinectColorFrameReady;
                    _kinect.DepthFrameReady += KinectDepthFrameReady;
                    _kinect.SkeletonFrameReady += KinectSkeletonFrameReady;
                    break;
                }
            default:
                break;
        }
        _kinect.Start();
    }

    private void KinectAllFramesReady(object sender, AllFramesReadyEventArgs e)
    {
        using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
        {
            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            {
            }
        }
    }

    public void Release()
    {
        if (_kinect != null)
        {
            if (_kinect.Status == KinectStatus.Connected)
            {
                _kinect.Stop();
            }
        }
    }

    public void ViewUp()
    {
        if (_kinect == null)
            return;
        if (!_kinect.IsRunning)
            return;
        if (_kinect.ElevationAngle <= _kinect.MaxElevationAngle - 5)
        {
            _kinect.ElevationAngle += 5;
        }
    }

    public void ViewDown()
    {
        if (_kinect == null)
            return;
        if (!_kinect.IsRunning)
            return;
        if (_kinect.ElevationAngle >= _kinect.MinElevationAngle + 5)
        {
            _kinect.ElevationAngle -= 5;
        }
    }

    public void GetColorStream()
    {
        using (ColorImageFrame colorImageFrame = _kinect.ColorStream.OpenNextFrame(30))
        {
            if (colorImageFrame == null)
            {
                ColorImageAvailable = false;
                return;
            }
            byte[] pixels = new byte[colorImageFrame.PixelDataLength];
            colorImageFrame.CopyPixelDataTo(pixels);
            colorImageFrameWidth = colorImageFrame.Width;
            colorImageFrameHeight = colorImageFrame.Height;
            colorStride = colorImageFrame.Width * 4;
            colorPixelsData = pixels;
            ColorImageAvailable = true;
        }
    }

    public void GetDepthStream()
    {
        using (DepthImageFrame depthImageFrame = _kinect.DepthStream.OpenNextFrame(30))
        {
            if (depthImageFrame == null)
            {
                depthImageAvailable = false;
                return;
            }
            short[] depthPixelData = new short[depthImageFrame.PixelDataLength];
            depthImageFrame.CopyPixelDataTo(depthPixelData);
            depthImageFrameWidth = depthImageFrame.Width;
            depthImageFrameHeight = depthImageFrame.Height;
            byte[] pixels = ConvertDepthFrameToColorFrame(depthPixelData, _kinect.DepthStream);
            depthBytePixelsData = pixels;
            depthStride = depthImageFrame.Width * 4;
            depthImageAvailable = true;
        }
    }

    public void GetSkeletonStream()
    {
       
    }

    public void GetSkeletons()
    {
        skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
        skeletonFrame.CopySkeletonDataTo(skeletons);
    }

    public byte[] GetDepthColorBytePixelData
    {
        get
        {
            return depthBytePixelsData;
        }
    }
    public byte[] GetColorPixelsData
    {
        get
        {
            return colorPixelsData;
        }
    }
    public short[] GetDepthShortPixelData
    {
        get
        {
            return depthShortPixelsData;
        }
    }
    public bool ColorImageAvailable
    {
        get
        {
            return colorImageAvailable;
        }
        set
        {
            colorImageAvailable = value;
        }
    }

    public bool DepthImageAvailable
    {
        get
        {
            return depthImageAvailable;
        }
        set
        {
            depthImageAvailable = value;
        }
    }

    private void GetSkeletonStreamAsync()
    {
        skeletonFrame = skeletonStream.OpenNextFrame(34);
    }

    private void KinectSkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e)
    {
        bool isSkeletonDataReady = false;
        using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
        {
            if (skeletonFrame != null)
            {
                skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
                skeletonFrame.CopySkeletonDataTo(skeletons);
                isSkeletonDataReady = true;
            }
        }
    }

    private void KinectColorFrameReady(object sender, ColorImageFrameReadyEventArgs e)
    {
        using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame())
        {
            if (colorImageFrame == null)
                return;
            byte[] pixels = new byte[colorImageFrame.PixelDataLength];
            colorImageFrame.CopyPixelDataTo(pixels);
            colorStride = colorImageFrame.Width * 4;
            colorPixelsData = pixels;
            ColorImageAvailable = true;
        }
    }

    private void KinectDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
    {
        
    }

    private void RenderMan(ColorImageFrame colorFrame, DepthImageFrame depthFrame)
    {
        if (!(depthFrame != null && colorFrame != null))
            return;
        int depthPixelIndex;
        int playerIndex;
        int colorPixelIndex;
        ColorImagePoint colorPoint;
        int colorStride = colorFrame.BytesPerPixel * colorFrame.Width;
        int bytePerPixelOfBgrImage = 4;
        int playerImageIndex = 0;
        depthFrame.CopyPixelDataTo(depthShortPixelsData);
        colorFrame.CopyPixelDataTo(colorPixelsData);
        byte[] manImage = new byte[depthFrame.Height * manBitmapStride];
        for (int j = 0; j < depthFrame.Height; j++)
        {
            for (int i = 0; i < depthFrame.Width; i++, playerImageIndex += bytePerPixelOfBgrImage)
            {
                depthPixelIndex = i + (j * depthFrame.Width);
                playerIndex = depthShortPixelsData[depthPixelIndex] & DepthImageFrame.PlayerIndexBitmask;
                //用户索引标识不为0,则该处属于人体部位。
                if (playerIndex != 0)
                {
                    //深度图像中某一个点映射到彩色图像坐标点
                    colorPoint = _kinect.MapDepthToColorImagePoint(depthFrame.Format, i, j, depthShortPixelsData[depthPixelIndex], colorFrame.Format);
                    colorPixelIndex = (colorPoint.X * colorFrame.BytesPerPixel) + (colorPoint.Y * colorStride);
                    manImage[playerImageIndex] = colorPixelsData[colorPixelIndex];//Blue
                    manImage[playerImageIndex + 1] = colorPixelsData[colorPixelIndex];//Green
                    manImage[playerImageIndex + 2] = colorPixelsData[colorPixelIndex];//Red
                    manImage[playerImageIndex + 3] = 0xFF;//Alpha
                }
                manBitmap.WritePixels(manImageRect, manImage, manBitmapStride, 0);
            }
        }
    }

    /// <summary>
    /// 单色直方图计算公式,返回256色灰阶,颜色越黑越远。
    /// </summary>
    /// <param name="dis">深度值,有效值为......</param>
    /// <returns></returns>
    private static byte CalculateIntensityFromDepth(int dis)
    {
        return (byte)(255 - (255 * Math.Max(dis - minDepthDistance, 0) / maxDepthDistancOddset));
    }

    /// <summary>
    /// 生成BGR32格式的图片字节数组
    /// </summary>
    /// <param name="depthImageFrame"></param>
    /// <returns></returns>
    private byte[] ConvertDepthFrameToGrayFrame(DepthImageFrame depthImageFrame)
    {
        short[] rawDepthData = new short[depthImageFrame.PixelDataLength];
        depthImageFrame.CopyPixelDataTo(rawDepthData);
        byte[] pixels = new byte[depthImageFrame.Height * depthImageFrame.Width * 4];
        for (int depthIndex = 0, colorIndex = 0; depthIndex < rawDepthData.Length && colorIndex < pixels.Length; depthIndex++, colorIndex += 4)
        {
            int player = rawDepthData[depthIndex] & DepthImageFrame.PlayerIndexBitmask;
            int depth = rawDepthData[depthIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth;
            if (depth <= 900)
            {
                //离Kinect很近
                pixels[colorIndex + blueIndex] = 255;
                pixels[colorIndex + greenIndex] = 0;
                pixels[colorIndex + redIndex] = 0;
            }
            else if (depth > 900 && depth < 2000)
            {
                pixels[colorIndex + blueIndex] = 0;
                pixels[colorIndex + greenIndex] = 255;
                pixels[colorIndex + redIndex] = 0;
            }
            else if (depth >= 2000)
            {
                //离Kinect超过2米
                pixels[colorIndex + blueIndex] = 0;
                pixels[colorIndex + greenIndex] = 0;
                pixels[colorIndex + redIndex] = 255;
            }
            //单色直方图着色
            byte intensity = CalculateIntensityFromDepth(depth);
            pixels[colorIndex + blueIndex] = intensity;
            pixels[colorIndex + greenIndex] = intensity;
            pixels[colorIndex + redIndex] = intensity;
            //如果是人体区域,用亮绿色标记
            if (player > 0)
            {
                pixels[colorIndex + blueIndex] = Colors.LightGreen.B;
                pixels[colorIndex + greenIndex] = Colors.LightGreen.G;
                pixels[colorIndex + redIndex] = Colors.LightGreen.R;
            }
        }
        return pixels;
    }

    /// <summary>
    /// 将16位灰阶深度图转为32位彩色深度图
    /// </summary>
    /// <param name="depthImageFrame">16位灰阶深度图</param>
    /// <param name="depthImageStream">用于获得深度数据流的相关属性</param>
    /// <returns></returns>
    private byte[] ConvertDepthFrameToColorFrame(short[] depthImageFrame, DepthImageStream depthImageStream)
    {
        byte[] depthFrame32 = new byte[depthImageStream.FrameWidth * depthImageStream.FrameHeight * bgr32BytesPerPixel];
        //通过常量获取有效视距,不用硬编码
        int tooNearDepth = depthImageStream.TooNearDepth;
        int tooFarDepth = depthImageStream.TooFarDepth;
        int unknowDepth = depthImageStream.UnknownDepth;
        for (int i16 = 0, i32 = 0; i16 < depthImageFrame.Length && i32 < depthFrame32.Length; i16++, i32 += 4)
        {
            int player = depthImageFrame[i16] & DepthImageFrame.PlayerIndexBitmask;
            int realDepth = depthImageFrame[i16] >> DepthImageFrame.PlayerIndexBitmaskWidth;
            //通过位运算,将13位的深度图裁剪位8位
            byte intensity = (byte)(~(realDepth >> 4));
            if (player == 0 && realDepth == 0)
            {
                depthFrame32[i32 + redIndex] = 255;
                depthFrame32[i32 + greenIndex] = 255;
                depthFrame32[i32 + blueIndex] = 255;
            }
            else if (player == 0 && realDepth == tooFarDepth)
            {
                //深紫色
                depthFrame32[i32 + redIndex] = 66;
                depthFrame32[i32 + greenIndex] = 0;
                depthFrame32[i32 + blueIndex] = 66;
            }
            else if (player == 0 && realDepth == unknowDepth)
            {
                //深棕色
                depthFrame32[i32 + redIndex] = 66;
                depthFrame32[i32 + greenIndex] = 66;
                depthFrame32[i32 + blueIndex] = 33;
            }
            else
            {
                depthFrame32[i32 + redIndex] = (byte)(intensity >> intensityShiftByPlayerR[player]);
                depthFrame32[i32 + greenIndex] = (byte)(intensity >> intensityShiftByPlayerG[player]);
                depthFrame32[i32 + blueIndex] = (byte)(intensity >> intensityShiftByPlayerB[player]);
            }
        }
        return depthFrame32;
    }
}

class CustomKinectException : ApplicationException
{
    public CustomKinectException()
    {
    }
    public CustomKinectException(string message) : base(message)
    {
    }
    public CustomKinectException(string message, Exception inner) : base(message, inner)
    {
    }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值