OpenNI不用标定获取人体骨架

1 篇文章 0 订阅
1 篇文章 0 订阅
// MySkeletonNew.cpp : Defines the entry point for the console application.
//

#include "stdafx.h"
#include <stdlib.h>
#include <iostream>
#include <vector>
#include <XnCppWrapper.h>
#include "cv.h"
#include "highgui.h"
#include <math.h>

using namespace std;
// callback function of user generator: new user
void XN_CALLBACK_TYPE NewUser(xn::UserGenerator& generator,
							  XnUserID user,
							  void* pCookie )
{
	cout << "New user identified: " << user << endl;
	generator.GetSkeletonCap().RequestCalibration( user, true );
}

// callback function of skeleton: calibration end 
void XN_CALLBACK_TYPE CalibrationEnd(xn::SkeletonCapability& skeleton,
									 XnUserID user,
									 XnCalibrationStatus eStatus,
									 void* pCookie )
{
	cout << "Calibration complete for user " <<  user << ", ";
	if( eStatus == XN_CALIBRATION_STATUS_OK )
	{
		cout << "Success" << endl;
		skeleton.StartTracking( user );
	}
	else
	{
		cout << "Failure" << endl;
		skeleton.RequestCalibration( user, true );
	}
}


int main( int argc, char** argv )
{
	// 1. initial context
	char key=0;
	xn::Context context;
	context.Init();
	xn::UserGenerator userGenerator;
	xn::DepthGenerator depthGenerator;
	xn::ImageGenerator imageGenerator;

	int startSkelPoints[14]={1,2,6,6,12,17,6,7,12,13,17,18,21,22};
	int endSkelPoints[14]={2,3,12,21,17,21,7,9,13,15,18,20,22,24};
	xn::ImageMetaData imageMD;

	IplImage* cameraImg=cvCreateImage(cvSize(640,480),IPL_DEPTH_8U,3);
	cvNamedWindow("Camera",1);

	// map output mode
	XnMapOutputMode mapMode;
	mapMode.nXRes = 640;
	mapMode.nYRes = 480;
	mapMode.nFPS = 30;

	// create generator
	depthGenerator.Create( context );//创建一个收集深度信息的节点
	depthGenerator.SetMapOutputMode( mapMode );//设置该节点的视频的模式
	imageGenerator.Create( context );
	userGenerator.Create( context );
	depthGenerator.GetAlternativeViewPointCap().SetViewPoint( imageGenerator ); 
	// 2. create user generator
	xn::UserGenerator mUserGenerator;
	mUserGenerator.Create( context );
	// 3. Register callback functions of user generator
	XnCallbackHandle hUserCB;
	mUserGenerator.RegisterUserCallbacks( NewUser, NULL, NULL, hUserCB );

	// 4. Register callback functions of skeleton capability
	xn::SkeletonCapability mSC = mUserGenerator.GetSkeletonCap();
	mSC.SetSkeletonProfile( XN_SKEL_PROFILE_ALL );
	XnCallbackHandle hCalibCB;
	mSC.RegisterToCalibrationComplete( CalibrationEnd, &mUserGenerator, hCalibCB );

	

	// 5. start generate data
	context.StartGeneratingAll();
	while( key!=27 )
	{
		// 6. Update date
		context.WaitAndUpdateAll();
		imageGenerator.GetMetaData(imageMD);
		memcpy(cameraImg->imageData,imageMD.Data(),640*480*3);
		cvCvtColor(cameraImg,cameraImg,CV_RGB2BGR);
		// 7. get user information
		XnUInt16 nUsers = mUserGenerator.GetNumberOfUsers();
		if( nUsers > 0 )
		{
			// 8. get users
			XnUserID* UserID = new XnUserID[nUsers];
			mUserGenerator.GetUsers( UserID, nUsers );

			// 9. check each user
			for( int i = 0; i < nUsers; ++i )
			{
				// 10. if is tracking skeleton
				if( mSC.IsTracking( UserID[i] ) )
				{
					XnPoint3D skelPointsIn[24],skelPointsOut[24];
					XnSkeletonJointTransformation mJointTran;
					for(int iter=0;iter<24;iter++)
					{
						//XnSkeletonJoint from 1 to 24			
						mSC.GetSkeletonJoint( UserID[i],XnSkeletonJoint(iter+1), mJointTran );
						skelPointsIn[iter]=mJointTran.position.position;
					}
					depthGenerator.ConvertRealWorldToProjective(24,skelPointsIn,skelPointsOut);
					for(int d=0;d<14;d++)
					{
						CvPoint startpoint = cvPoint(skelPointsOut[startSkelPoints[d]-1].X,skelPointsOut[startSkelPoints[d]-1].Y);
						CvPoint endpoint = cvPoint(skelPointsOut[endSkelPoints[d]-1].X,skelPointsOut[endSkelPoints[d]-1].Y);
						if (d==0)
						{
							CvPoint center = cvPoint((startpoint.x + endpoint.x)/2,(startpoint.y + endpoint.y)/2);
							int size;
							size = (int)sqrt((double)((endpoint.x - startpoint.x)*(endpoint.x - startpoint.x)+(endpoint.y - startpoint.y)*(endpoint.y - startpoint.y)));
							CvPoint p1,p2;
							p1.x = startpoint.x - size/2;
							p1.y = startpoint.y;
							p2.x = endpoint.x + size/2;
							p2.y = endpoint.y;
							cvRectangle(cameraImg,p1,p2,CV_RGB(255,0,0));
						}
						cvCircle(cameraImg,startpoint,3,CV_RGB(0,0,255),12);
						cvCircle(cameraImg,endpoint,3,CV_RGB(0,0,255),12);
						cvLine(cameraImg,startpoint,endpoint,CV_RGB(0,0,255),4);
					}


				}
			}
			delete [] UserID;
		}
		cvShowImage("Camera",cameraImg);
		key=cvWaitKey(20);
	}
	// 13. stop and shutdown
	cvDestroyWindow("Camera");
	cvReleaseImage(&cameraImg);
	context.StopGeneratingAll();
	//context.Shutdown();
	context.Release();
	return 0;
}





刚接触Kinect不久,用的环境是VS2008 + Opencv2.0 + OpenNi1.4.0.2 + Nite1.5.0.2,使用语言C++

Nite1.5x开始支持不标定的骨架跟踪了,真是福音啊,再不用对着Kinect摆Psi姿势了

闲话少说,开始

本文借鉴了两位前辈的成果,综合自己的想法,完成了无标定骨架跟踪,两位前辈的博客地址如下,建议先看看它们的博客再来看程序

http://topic.csdn.net/u/20120416/22/c3323ce1-6940-4de0-97b7-31378e39bdcf.html

http://blog.csdn.net/chenxin_130/article/details/6950480

 最后的红色矩形框是根据关节点绘制的

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值