基于2D的人脸跟踪显示

 
// FaceTrack.cpp : 定义控制台应用程序的入口点。
//

#include "stdafx.h"
#include "opencv2/opencv.hpp"
#include "Kinect.h"
#include "kinect.face.h"
using namespace cv;
using namespace std;
//#pragma comment ( lib, "Kinect20.face.lib" ) 

//安全释放指针
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease)
{
	if (pInterfaceToRelease != NULL)
	{
		pInterfaceToRelease->Release();
		pInterfaceToRelease = NULL;
	}
}

int _tmain(int argc, _TCHAR* argv[])
{
	IKinectSensor *kinectSensor;
	HRESULT hr = GetDefaultKinectSensor(&kinectSensor);
	kinectSensor->Open();
	
//开启深度数据获取
	IDepthFrameSource* depths = nullptr;
	kinectSensor->get_DepthFrameSource(&depths);     // 获取深度数据源  
	int height, width;                               //图像宽和高
	IFrameDescription   * myDescription = nullptr;  //取得深度数据的分辨率
	depths->get_FrameDescription(&myDescription);
	myDescription->get_Height(&height);
	myDescription->get_Width(&width);
	myDescription->Release();

	IDepthFrameReader* depthRead = nullptr;
	depths->OpenReader(&depthRead); // 打开深度解析器  

	cout << "width " << width << "  height " << height << endl;
	Mat img16u(height,width,CV_16UC1);
	Mat img8u(height,width,CV_8UC1);

//开启彩色数据获取
	IColorFrameSource* colorFramSouce = nullptr;
	kinectSensor->get_ColorFrameSource(&colorFramSouce); // 获取图像数据源  
	IColorFrameReader* colorFramRead = nullptr;
	colorFramSouce->OpenReader(&colorFramRead); // 打开解析器

	IFrameDescription* frameDs = nullptr;
	colorFramSouce->get_FrameDescription(&frameDs); // 获取信息的属性  
	int heightColor, widthColor;
	frameDs->get_Height(&heightColor);
	frameDs->get_Width(&widthColor);
	cout << "heightColor " << heightColor << "  widthColor " << widthColor << endl;
	Mat imgColor(heightColor, widthColor, CV_8UC4);

//开启骨骼数据获取
	IBodyFrameSource    * myBodySource = nullptr;       //source
	kinectSensor->get_BodyFrameSource(&myBodySource);
	int heightBody, widthBody;

	
	IBodyFrameReader    * myBodyReader = nullptr;       //reader
	myBodySource->OpenReader(&myBodyReader);
	int myBodyCount = 0;
	myBodySource->get_BodyCount(&myBodyCount);
	
	ICoordinateMapper   * myMapper = nullptr;
	kinectSensor->get_CoordinateMapper(&myMapper);

//脸部跟踪
	IFaceFrameSource * myFaceSource[BODY_COUNT];
	//kinectSensor->get_FaceFrameFeatures(&myBodySource);
	DWORD features = FaceFrameFeatures::FaceFrameFeatures_BoundingBoxInColorSpace
		| FaceFrameFeatures::FaceFrameFeatures_PointsInColorSpace
		| FaceFrameFeatures::FaceFrameFeatures_RotationOrientation;
		//| FaceFrameFeatures::FaceFrameFeatures_Happy
		//| FaceFrameFeatures::FaceFrameFeatures_RightEyeClosed
		//| FaceFrameFeatures::FaceFrameFeatures_LeftEyeClosed
		//| FaceFrameFeatures::FaceFrameFeatures_MouthOpen
		//| FaceFrameFeatures::FaceFrameFeatures_MouthMoved
		//| FaceFrameFeatures::FaceFrameFeatures_LookingAway
		//| FaceFrameFeatures::FaceFrameFeatures_Glasses
		//| FaceFrameFeatures::FaceFrameFeatures_FaceEngagement;
	IFaceFrameReader* faceReader[BODY_COUNT];

	for (int i = 0; i < BODY_COUNT; i++)
	{
	
		if (CreateFaceFrameSource(kinectSensor, 0, features, &myFaceSource[i]) != S_OK)
		{
			std::cerr << "Error : CreateFaceFrameSource" << std::endl;
			return -1;
		}
		myFaceSource[i]->OpenReader(&faceReader[i]);
	}

	//循环获取
	while (true)
	{
		//获取深度摄像头数据
		IDepthFrame* depthFram = nullptr;
		if (depthRead->AcquireLatestFrame(&depthFram) == S_OK) //通过Reader尝试获取最新的一帧深度数据,放入深度帧中,并判断是否成功获取
		{
			depthFram->CopyFrameDataToArray(height * width, (UINT16 *)img16u.data); //先把数据存入16位的图像矩阵中
			img16u.convertTo(img8u, CV_8UC1, 255.0 / 4500);   //再把16位转换为8位
			imshow("TEST", img8u);
			depthFram->Release();
		}
		//获取RGB摄像头数据
		IColorFrame* colorFram = nullptr;
		if (colorFramRead->AcquireLatestFrame(&colorFram) == S_OK) //通过Reader尝试获取最新的一帧深度数据,放入深度帧中,并判断是否成功获取
		{
			colorFram->CopyConvertedFrameDataToArray(heightColor*widthColor * 4, (BYTE*)imgColor.data, ColorImageFormat_Bgra);

			colorFram->Release();
		}
		//获取人骨胳数据
		IBodyFrame *myBodyFrame = nullptr;
		if (myBodyReader->AcquireLatestFrame(&myBodyFrame) == S_OK)
		{
			myBodyCount = 0;
			IBody   ** bodyArr = nullptr;
			myBodySource->get_BodyCount(&myBodyCount);
			bodyArr = new IBody *[myBodyCount];
			for (int i = 0; i < myBodyCount; i++)   //bodyArr的初始化
				bodyArr[i] = nullptr;

			if (myBodyFrame->GetAndRefreshBodyData(myBodyCount, bodyArr) == S_OK)
			{

				for (int i = 0; i < myBodyCount; i++)   //遍历6个人(可能用不完)
				{
					BOOLEAN     result = false;
					if (bodyArr[i]->get_IsTracked(&result) == S_OK && result)   //判断此人是否被侦测到
					{
						cout << "Body " << i << " tracked!" << endl;
						UINT64 trackingId = _UI64_MAX;
						if (bodyArr[i]->get_TrackingId(&trackingId) == S_OK)
						{
							myFaceSource[i]->put_TrackingId(trackingId);
						}
					}
				}
			}

			for (int i = 0; i < myBodyCount; i++)
			{
				IFaceFrame*faceFrame = nullptr;
				if (faceReader[i]->AcquireLatestFrame(&faceFrame) == S_OK && faceFrame != nullptr)
				{
					BOOLEAN tracked = false;
					if (SUCCEEDED(faceFrame->get_IsTrackingIdValid(&tracked)) && tracked)
					{
						IFaceFrameResult *faceResult = nullptr;
						if (SUCCEEDED(faceFrame->get_FaceFrameResult(&faceResult)))
						{
							
							PointF facePoint[FacePointType_Count];  //FacePointType_Count=5
							if (SUCCEEDED(faceResult->GetFacePointsInColorSpace(FacePointType_Count, facePoint)))
							{
								for (int j = 0; j <FacePointType_Count; j++)
								{
									circle(imgColor, Point(facePoint[j].X, facePoint[j].Y), 5, Scalar(0, 0, 255), 5);
								}
								//cout << "facePoint" << facePoint.size() << endl;
								
							}
							RectI box;
							if (SUCCEEDED(faceResult->get_FaceBoundingBoxInColorSpace(&box)))
							{
								rectangle(imgColor, cv::Rect(box.Left, box.Top, box.Right - box.Left, box.Bottom - box.Top), Scalar(0, 0, 255, 255),5);
							}
							faceResult->Release();

						}
					}
				}
				//CreateFaceModel();
				//UINT32 triangel[4];
				//GetFaceModelTriangles(heightColor*widthColor, triangel);
				
				//delete[] bodyArr;
				imshow("imgColor", imgColor);
			}
			
			myBodyFrame->Release();
		}

		waitKey(1);
	}

	//img show
	Mat src=imread("1.jpg");
	imshow("lk", src);
	waitKey();
	depthRead->Release();        //释放不用的变量并且关闭感应器
	colorFramRead->Release();
	kinectSensor->Close();
	return 0;
}

只显示脸部的5个主要特征点

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值