今天尝试了下在VS2010下创建一个控制台程序,配置了kinect和opencv,花了一个下午将网上的一个例子跑通了。
自行安装opencv2.4.3并进行相关配置,kinect我下载了KinectSDK-v1.6-Setup与KinectDeveloperToolkit-v1.6.0-Setup,SDK和Toolkit都得安装,连接上kinect感应器,会自动安装完驱动。
1.新建一个控制台程序;
2.进入项目属性,选择配置属性->VC ++目录->包含目录
我的Kinect的安装路径如下:
C:\Program Files\Microsoft SDKs\Kinect\Developer Toolkit v1.6.0(其中包含inc,Lib,Redist,Samples,Tools文件夹)
C:\Program Files\Microsoft SDKs\Kinect\v1.6(包含Assemblies,inc,lib,Redist文件夹)
则将C:\Program Files\Microsoft SDKs\Kinect\Developer Toolkit v1.6.0\inc 【该目录包含FaceTrackLib.h用于脸部跟踪的头文件】
C:\Program Files\Microsoft SDKs\Kinect\v1.6\inc 【该目录包含NuiApi.h、NuiImageCamera.h、NuiSensor.h、NuiSkeleton.h等自然用户界面头文件】
这两项添加到包含目录中;
3.进入项目属性,选择配置属性->VC ++目录->库目录
将C:\Program Files\Microsoft SDKs\Kinect\Developer Toolkit v1.6.0\Lib\x86 【该目录包含FaceTrackLib.lib面部跟踪需要的库文件】
C:\Program Files\Microsoft SDKs\Kinect\v1.6\lib\x86 【该目录包含Kinect10.lib库文件】
根据操作系统位数选择是x86的还是amd64文件夹目录
这两项添加到库目录下;
4.进入项目属性,选择配置属性->链接器->输入->附加依赖项
将Kinect10.lib
FaceTrackLib.lib
这两项加入附加依赖项中;
5.进入项目属性,选择配置属性->C/C++->预处理器->预处理器定义
将_CONSOL更改为_WINDOWS;
6.最后将FaceTrackData.dll与FaceTrackLib.dll文件拷贝到项目工程目录下
这两项动态链接库文件的目录在C:\Program Files\Microsoft SDKs\Kinect\Developer Toolkit v1.6.0\Redist\x86
有关kinect和opencv的配置可参考:http://blog.csdn.net/songyimin1208/article/details/50470202
本文参照http://blog.csdn.net/guoming0000/article/details/7607473给的代码完成第一次kinect的学习,具体代码解析可参考http://blog.csdn.net/guoming0000/article/details/8658580
// win32_KinectFaceTracking.cpp : 定义控制台应用程序的入口点。
//
#include "stdafx.h"
//----------------------------------------------------
#define _WINDOWS
#include <FaceTrackLib.h>
HRESULT VisualizeFaceModel(IFTImage* pColorImg, IFTModel* pModel, FT_CAMERA_CONFIG const* pCameraConfig, FLOAT const* pSUCoef,
FLOAT zoomFactor, POINT viewOffset, IFTResult* pAAMRlt, UINT32 color);
//----------------------------------------------------
#include <vector>
#include <deque>
#include <iomanip>
#include <stdexcept>
#include <string>
#include <iostream>
#include "opencv2\opencv.hpp"
using namespace std;
using namespace cv;
#include <windows.h>
#include <mmsystem.h>
#include <assert.h>
#include <strsafe.h>
#include "NuiApi.h"
#define COLOR_WIDTH 640
#define COLOR_HIGHT 480
#define DEPTH_WIDTH 320
#define DEPTH_HIGHT 240
#define SKELETON_WIDTH 640
#define SKELETON_HIGHT 480
#define CHANNEL 3
BYTE buf[DEPTH_WIDTH*DEPTH_HIGHT*CHANNEL];
int drawColor(HANDLE h);
int drawDepth(HANDLE h);
int drawSkeleton();
//---face tracking------------------------------------------
BYTE *colorBuffer,*depthBuffer;
IFTImage* pColorFrame;
IFTImage* pDepthFrame;
FT_VECTOR3D m_hint3D[2];
//-----------------------------------------------------------------------------------
HANDLE h1;
HANDLE h3;
HANDLE h5;
HANDLE h2;
HANDLE h4;
DWORD WINAPI VideoFunc(LPVOID pParam)
{
// cout<<"video start!"<<endl;
while(TRUE)
{
if(WaitForSingleObject(h1,INFINITE)==WAIT_OBJECT_0)
{
drawColor(h2);
}
// Sleep(10);
// cout<<"video"<<endl;
}
}
DWORD WINAPI DepthFunc(LPVOID pParam)
{
// cout<<"depth start!"<<endl;
while(TRUE)
{
if(WaitForSingleObject(h3,INFINITE)==WAIT_OBJECT_0)
{
drawDepth(h4);
}
// Sleep(10);
// cout<<"depth"<<endl;
}
}
DWORD WINAPI SkeletonFunc(LPVOID pParam)
{
// HANDLE h = (HANDLE)pParam;
// cout<<"skeleton start!"<<endl;
while(TRUE)
{
if(WaitForSingleObject(h3,INFINITE)==WAIT_OBJECT_0)
drawSkeleton();
// Sleep(10);
// cout<<"skeleton"<<endl;
}
}
DWORD WINAPI TrackFace(LPVOID pParam)
{
cout<<"track face start !"<<endl;
while(TRUE)
{
//do something
Sleep(16);
cout<<"track face"<<endl;
}
}
//-----------------------------------------------------------------------------------
int drawColor(HANDLE h)
{
const NUI_IMAGE_FRAME * pImageFrame = NULL;
HRESULT hr = NuiImageStreamGetNextFrame( h, 0, &pImageFrame );
if( FAILED( hr ) )
{
cout<<"Get Color Image Frame Failed"<<endl;
return -1;
}
INuiFrameTexture * pTexture = pImageFrame->pFrameTexture;
NUI_LOCKED_RECT LockedRect;
pTexture->LockRect( 0, &LockedRect, NULL, 0 );
if( LockedRect.Pitch != 0 )
{
BYTE * pBuffer = (BYTE*) LockedRect.pBits;
colorBuffer = pBuffer;
memcpy(pColorFrame->GetBuffer(), PBYTE(LockedRect.pBits), min(pColorFrame->GetBufferSize(), UINT(pTexture->BufferLen())));
Mat temp(COLOR_HIGHT,COLOR_WIDTH,CV_8UC4,pBuffer);
imshow("b",temp);
waitKey(1);
}
NuiImageStreamReleaseFrame( h, pImageFrame );
return 0;
}
int drawDepth(HANDLE h)
{
const NUI_IMAGE_FRAME * pImageFrame = NULL;
HRESULT hr = NuiImageStreamGetNextFrame( h, 0, &pImageFrame );
if( FAILED( hr ) )
{
cout<<"Get Depth Image Frame Failed"<<endl;
return -1;
}
INuiFrameTexture * pTexture = pImageFrame->pFrameTexture;
NUI_LOCKED_RECT LockedRect;
pTexture->LockRect( 0, &LockedRect, NULL, 0 );
if( LockedRect.Pitch != 0 )
{
USHORT * pBuff = (USHORT*) LockedRect.pBits;
// depthBuffer = pBuff;
memcpy(pDepthFrame->GetBuffer(), PBYTE(LockedRect.pBits), min(pDepthFrame->GetBufferSize(), UINT(pTexture->BufferLen())));
for(int i=0;i<DEPTH_WIDTH*DEPTH_HIGHT;i++)
{
BYTE index = pBuff[i]&0x07;
USHORT realDepth = (pBuff[i]&0xFFF8)>>3;
BYTE scale = 255 - (BYTE)(256*realDepth/0x0fff);
buf[CHANNEL*i] = buf[CHANNEL*i+1] = buf[CHANNEL*i+2] = 0;
switch( index )
{
case 0:
buf[CHANNEL*i]=scale/2;
buf[CHANNEL*i+1]=scale/2;
buf[CHANNEL*i+2]=scale/2;
break;
case 1:
buf[CHANNEL*i]=scale;
break;
case 2:
buf[CHANNEL*i+1]=scale;
break;
case 3:
buf[CHANNEL*i+2]=scale;
break;
case 4:
buf[CHANNEL*i]=scale;
buf[CHANNEL*i+1]=scale;
break;
case 5:
buf[CHANNEL*i]=scale;
buf[CHANNEL*i+2]=scale;
break;
case 6:
buf[CHANNEL*i+1]=scale;
buf[CHANNEL*i+2]=scale;
break;
case 7:
buf[CHANNEL*i]=255-scale/2;
buf[CHANNEL*i+1]=255-scale/2;
buf[CHANNEL*i+2]=255-scale/2;
break;
}
}
Mat b(DEPTH_HIGHT,DEPTH_WIDTH,CV_8UC3,buf);
imshow("depth",b);
waitKey(1);
}
NuiImageStreamReleaseFrame( h, pImageFrame );
return 0;
}
int drawSkeleton()
{
NUI_SKELETON_FRAME SkeletonFrame;
cv::Point pt[20];
Mat skeletonMat=Mat(SKELETON_HIGHT,SKELETON_WIDTH,CV_8UC3,Scalar(0,0,0));
HRESULT hr = NuiSkeletonGetNextFrame( 0, &SkeletonFrame );
if( FAILED( hr ) )
{
cout<<"Get Skeleton Image Frame Failed"<<endl;
return -1;
}
bool bFoundSkeleton = false;
for( int i = 0 ; i < NUI_SKELETON_COUNT ; i++ )
{
if( SkeletonFrame.SkeletonData[i].eTrackingState == NUI_SKELETON_TRACKED )
{
bFoundSkeleton = true;
}
}
// Has skeletons!
if( bFoundSkeleton )
{
NuiTransformSmooth(&SkeletonFrame,NULL);
for( int i = 0 ; i < NUI_SKELETON_COUNT ; i++ )
{
if( SkeletonFrame.SkeletonData[i].eTrackingState == NUI_SKELETON_TRACKED )
{
for (int j = 0; j < NUI_SKELETON_POSITION_COUNT; j++)
{
float fx,fy;
NuiTransformSkeletonToDepthImage( SkeletonFrame.SkeletonData[i].SkeletonPositions[j], &fx, &fy );
pt[j].x = (int) ( fx * SKELETON_WIDTH )/320;
pt[j].y = (int) ( fy * SKELETON_HIGHT )/240;
circle(skeletonMat,pt[j],5,CV_RGB(255,0,0));
}
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_HEAD],pt[NUI_SKELETON_POSITION_SHOULDER_CENTER],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_SHOULDER_CENTER],pt[NUI_SKELETON_POSITION_SPINE],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_SPINE],pt[NUI_SKELETON_POSITION_HIP_CENTER],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_HAND_RIGHT],pt[NUI_SKELETON_POSITION_WRIST_RIGHT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_WRIST_RIGHT],pt[NUI_SKELETON_POSITION_ELBOW_RIGHT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_ELBOW_RIGHT],pt[NUI_SKELETON_POSITION_SHOULDER_RIGHT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_SHOULDER_RIGHT],pt[NUI_SKELETON_POSITION_SHOULDER_CENTER],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_SHOULDER_CENTER],pt[NUI_SKELETON_POSITION_SHOULDER_LEFT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_SHOULDER_LEFT],pt[NUI_SKELETON_POSITION_ELBOW_LEFT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_ELBOW_LEFT],pt[NUI_SKELETON_POSITION_WRIST_LEFT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_WRIST_LEFT],pt[NUI_SKELETON_POSITION_HAND_LEFT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_HIP_CENTER],pt[NUI_SKELETON_POSITION_HIP_RIGHT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_HIP_RIGHT],pt[NUI_SKELETON_POSITION_KNEE_RIGHT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_KNEE_RIGHT],pt[NUI_SKELETON_POSITION_ANKLE_RIGHT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_ANKLE_RIGHT],pt[NUI_SKELETON_POSITION_FOOT_RIGHT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_HIP_CENTER],pt[NUI_SKELETON_POSITION_HIP_LEFT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_HIP_LEFT],pt[NUI_SKELETON_POSITION_KNEE_LEFT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_KNEE_LEFT],pt[NUI_SKELETON_POSITION_ANKLE_LEFT],CV_RGB(0,255,0));
cv::line(skeletonMat,pt[NUI_SKELETON_POSITION_ANKLE_LEFT],pt[NUI_SKELETON_POSITION_FOOT_LEFT],CV_RGB(0,255,0));
m_hint3D[0].x=SkeletonFrame.SkeletonData[i].SkeletonPositions[NUI_SKELETON_POSITION_SHOULDER_CENTER].x;
m_hint3D[0].y=SkeletonFrame.SkeletonData[i].SkeletonPositions[NUI_SKELETON_POSITION_SHOULDER_CENTER].y;
m_hint3D[0].z=SkeletonFrame.SkeletonData[i].SkeletonPositions[NUI_SKELETON_POSITION_SHOULDER_CENTER].z;
m_hint3D[1].x=SkeletonFrame.SkeletonData[i].SkeletonPositions[NUI_SKELETON_POSITION_HEAD].x;
m_hint3D[1].y=SkeletonFrame.SkeletonData[i].SkeletonPositions[NUI_SKELETON_POSITION_HEAD].y;
m_hint3D[1].z=SkeletonFrame.SkeletonData[i].SkeletonPositions[NUI_SKELETON_POSITION_HEAD].z;
}
}
}
imshow("skeleton",skeletonMat);
waitKey(1);
return 0;
}
int main(int argc,char * argv[])
{
//初始化NUI
HRESULT hr = NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX|NUI_INITIALIZE_FLAG_USES_COLOR|NUI_INITIALIZE_FLAG_USES_SKELETON);
if( hr != S_OK )
{
cout<<"NuiInitialize failed"<<endl;
return hr;
}
//打开KINECT设备的彩色图信息通道
h1 = CreateEvent( NULL, TRUE, FALSE, NULL );
h2 = NULL;
hr = NuiImageStreamOpen(NUI_IMAGE_TYPE_COLOR,NUI_IMAGE_RESOLUTION_640x480, 0, 2, h1, &h2);
if( FAILED( hr ) )
{
cout<<"Could not open image stream video"<<endl;
return hr;
}
h3 = CreateEvent( NULL, TRUE, FALSE, NULL );
h4 = NULL;
hr = NuiImageStreamOpen( NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_320x240, 0, 2, h3, &h4);
if( FAILED( hr ) )
{
cout<<"Could not open depth stream video"<<endl;
return hr;
}
h5 = CreateEvent( NULL, TRUE, FALSE, NULL );
hr = NuiSkeletonTrackingEnable( h5, 0 );
if( FAILED( hr ) )
{
cout<<"Could not open skeleton stream video"<<endl;
return hr;
}
HANDLE hThread1,hThread2,hThread3;
hThread1 = CreateThread(NULL,0,VideoFunc,h2,0,NULL);
hThread2 = CreateThread(NULL,0,DepthFunc,h4,0,NULL);
hThread3 = CreateThread(NULL,0,SkeletonFunc,NULL,0,NULL);
m_hint3D[0] = FT_VECTOR3D(0, 0, 0);
m_hint3D[1] = FT_VECTOR3D(0, 0, 0);
pColorFrame = FTCreateImage();
pDepthFrame = FTCreateImage();
IFTFaceTracker* pFT = FTCreateFaceTracker();
if(!pFT)
{
return -1;// Handle errors
}
FT_CAMERA_CONFIG myCameraConfig = {640, 480, NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS}; // width, height, focal length
FT_CAMERA_CONFIG depthConfig;
depthConfig.FocalLength = NUI_CAMERA_DEPTH_NOMINAL_FOCAL_LENGTH_IN_PIXELS;
depthConfig.Width = 320;
depthConfig.Height = 240;//貌似这里一定要填,而且要填对才行!!
hr = pFT->Initialize(&myCameraConfig, &depthConfig, NULL, NULL);
if( FAILED(hr) )
{
return -2;// Handle errors
}
// Create IFTResult to hold a face tracking result
IFTResult* pFTResult = NULL;
hr = pFT->CreateFTResult(&pFTResult);
if(FAILED(hr))
{
return -11;
}
// prepare Image and SensorData for 640x480 RGB images
if(!pColorFrame)
{
return -12;// Handle errors
}
// Attach assumes that the camera code provided by the application
// is filling the buffer cameraFrameBuffer
// pColorFrame->Attach(640, 480, colorBuffer, FTIMAGEFORMAT_UINT8_B8G8R8X8, 640*3);
hr = pColorFrame->Allocate(640, 480, FTIMAGEFORMAT_UINT8_B8G8R8X8);
if (FAILED(hr))
{
return hr;
}
hr = pDepthFrame->Allocate(320, 240, FTIMAGEFORMAT_UINT16_D13P3);
if (FAILED(hr))
{
return hr;
}
FT_SENSOR_DATA sensorData;
sensorData.pVideoFrame = pColorFrame;
sensorData.pDepthFrame = pDepthFrame;
sensorData.ZoomFactor = 1.0f;
POINT point;point.x=0;point.y=0;
sensorData.ViewOffset = point;
bool isTracked = false;
int iFaceTrackTimeCount=0;
// Track a face
while ( true )
{
// Call your camera method to process IO and fill the camera buffer
// cameraObj.ProcessIO(cameraFrameBuffer); // replace with your method
if(!isTracked)
{
hr = pFT->StartTracking(&sensorData, NULL, m_hint3D, pFTResult);
if(SUCCEEDED(hr) && SUCCEEDED(pFTResult->GetStatus()))
{
isTracked = true;
}
else
{
// Handle errors
isTracked = false;
}
}
else
{
// Continue tracking. It uses a previously known face position,
// so it is an inexpensive call.
hr = pFT->ContinueTracking(&sensorData, m_hint3D, pFTResult);
if(FAILED(hr) || FAILED (pFTResult->GetStatus()))
{
// Handle errors
isTracked = false;
}
}
if(isTracked)
{printf("被跟踪了!!!!!!!!!!!!!!!\n");
IFTModel* ftModel;
HRESULT hr = pFT->GetFaceModel(&ftModel);
FLOAT* pSU = NULL;
UINT numSU;
BOOL suConverged;
pFT->GetShapeUnits(NULL, &pSU, &numSU, &suConverged);
POINT viewOffset = {0, 0};
hr = VisualizeFaceModel(pColorFrame, ftModel, &myCameraConfig, pSU, 1.0, viewOffset, pFTResult, 0x00FFFF00);
if(FAILED(hr))
printf("显示失败!!\n");
Mat tempMat(COLOR_HIGHT,COLOR_WIDTH,CV_8UC4,pColorFrame->GetBuffer());
imshow("faceTracking",tempMat);
waitKey(1);
}
//printf("%d\n",pFTResult->GetStatus());
// Do something with pFTResult.
Sleep(16);
iFaceTrackTimeCount++;
if(iFaceTrackTimeCount>16*1000)
break;
// Terminate on some criteria.
}
// Clean up.
pFTResult->Release();
pColorFrame->Release();
pFT->Release();
CloseHandle(hThread1);
CloseHandle(hThread2);
CloseHandle(hThread3);
Sleep(60000);
NuiShutdown();
return 0;
}
HRESULT VisualizeFaceModel(IFTImage* pColorImg, IFTModel* pModel, FT_CAMERA_CONFIG const* pCameraConfig, FLOAT const* pSUCoef,
FLOAT zoomFactor, POINT viewOffset, IFTResult* pAAMRlt, UINT32 color)
{
if (!pColorImg || !pModel || !pCameraConfig || !pSUCoef || !pAAMRlt)
{
return E_POINTER;
}
HRESULT hr = S_OK;
UINT vertexCount = pModel->GetVertexCount();
FT_VECTOR2D* pPts2D = reinterpret_cast<FT_VECTOR2D*>(_malloca(sizeof(FT_VECTOR2D) * vertexCount));
if (pPts2D)
{
FLOAT *pAUs;
UINT auCount;
hr = pAAMRlt->GetAUCoefficients(&pAUs, &auCount);
if (SUCCEEDED(hr))
{
FLOAT scale, rotationXYZ[3], translationXYZ[3];
hr = pAAMRlt->Get3DPose(&scale, rotationXYZ, translationXYZ);
if (SUCCEEDED(hr))
{
hr = pModel->GetProjectedShape(pCameraConfig, zoomFactor, viewOffset, pSUCoef, pModel->GetSUCount(), pAUs, auCount,
scale, rotationXYZ, translationXYZ, pPts2D, vertexCount);
if (SUCCEEDED(hr))
{
POINT* p3DMdl = reinterpret_cast<POINT*>(_malloca(sizeof(POINT) * vertexCount));
if (p3DMdl)
{
for (UINT i = 0; i < vertexCount; ++i)
{
p3DMdl[i].x = LONG(pPts2D[i].x + 0.5f);
p3DMdl[i].y = LONG(pPts2D[i].y + 0.5f);
}
FT_TRIANGLE* pTriangles;
UINT triangleCount;
hr = pModel->GetTriangles(&pTriangles, &triangleCount);
if (SUCCEEDED(hr))
{
struct EdgeHashTable
{
UINT32* pEdges;
UINT edgesAlloc;
void Insert(int a, int b)
{
UINT32 v = (min(a, b) << 16) | max(a, b);
UINT32 index = (v + (v << 8)) * 49157, i;
for (i = 0; i < edgesAlloc - 1 && pEdges[(index + i) & (edgesAlloc - 1)] && v != pEdges[(index + i) & (edgesAlloc - 1)]; ++i)
{
}
pEdges[(index + i) & (edgesAlloc - 1)] = v;
}
} eht;
eht.edgesAlloc = 1 << UINT(log(2.f * (1 + vertexCount + triangleCount)) / log(2.f));
eht.pEdges = reinterpret_cast<UINT32*>(_malloca(sizeof(UINT32) * eht.edgesAlloc));
if (eht.pEdges)
{
ZeroMemory(eht.pEdges, sizeof(UINT32) * eht.edgesAlloc);
for (UINT i = 0; i < triangleCount; ++i)
{
eht.Insert(pTriangles[i].i, pTriangles[i].j);
eht.Insert(pTriangles[i].j, pTriangles[i].k);
eht.Insert(pTriangles[i].k, pTriangles[i].i);
}
for (UINT i = 0; i < eht.edgesAlloc; ++i)
{
if(eht.pEdges[i] != 0)
{
pColorImg->DrawLine(p3DMdl[eht.pEdges[i] >> 16], p3DMdl[eht.pEdges[i] & 0xFFFF], color, 1);
}
}
_freea(eht.pEdges);
}
// Render the face rect in magenta
RECT rectFace;
hr = pAAMRlt->GetFaceRect(&rectFace);
if (SUCCEEDED(hr))
{
POINT leftTop = {rectFace.left, rectFace.top};
POINT rightTop = {rectFace.right - 1, rectFace.top};
POINT leftBottom = {rectFace.left, rectFace.bottom - 1};
POINT rightBottom = {rectFace.right - 1, rectFace.bottom - 1};
UINT32 nColor = 0xff00ff;
SUCCEEDED(hr = pColorImg->DrawLine(leftTop, rightTop, nColor, 1)) &&
SUCCEEDED(hr = pColorImg->DrawLine(rightTop, rightBottom, nColor, 1)) &&
SUCCEEDED(hr = pColorImg->DrawLine(rightBottom, leftBottom, nColor, 1)) &&
SUCCEEDED(hr = pColorImg->DrawLine(leftBottom, leftTop, nColor, 1));
}
}
_freea(p3DMdl);
}
else
{
hr = E_OUTOFMEMORY;
}
}
}
}
_freea(pPts2D);
}
else
{
hr = E_OUTOFMEMORY;
}
return hr;
}
相关学习链接:http://blog.csdn.net/yangtrees/article/details/8702778#
https://www.codeproject.com/articles/394975/how-to-use-kinect-face-tracking-sdk