// comment out the following #define, if you want to use the depth sensor and the KinectManager on per-scene basis
#define USE_SINGLE_KM_IN_MULTIPLE_SCENES
//如果编码错误,unity界面显示,需要改成mono 修改编码到ut。。。16,然后回代码丢失 然后重新复制过去
using UnityEngine;
using UnityEngine.Networking;
using System;
using System.Collections;
using System.Collections.Generic;
//using System.Linq;
/// <summary>
/// KinectManager is the main and the most basic Kinect-related component. It is used to control the sensor and poll the data streams.
/// </summary>
public class KinectManager : MonoBehaviour
{
[Tooltip("How high above the ground is the sensor, in meters.传感器离地面有多高,单位是米")]
public float sensorHeight = 1.0f;
[Tooltip("Kinect elevation angle (in degrees). May be positive or negative.Kinect仰角(角度)")]
public float sensorAngle = 0f;
public enum AutoHeightAngle : int { DontUse, ShowInfoOnly, AutoUpdate, AutoUpdateAndShowInfo }
[Tooltip("Whether to automatically set the sensor height and angle or not. The user must stay in front of the sensor, in order the automatic detection to work." +
"是否自动设置传感器的高度和角度。用户必须呆在传感器前,以便自动检测工作。")]
public AutoHeightAngle autoHeightAngle = AutoHeightAngle.DontUse;
[Tooltip("Whether to flip left and right, relative to the sensor." +
"是否相对于传感器左右翻转")]
private bool flipLeftRight = false;
public enum UserMapType : int { None, RawUserDepth, BodyTexture, UserTexture, CutOutTexture }
[Tooltip("Whether and how to utilize the user and depth images." +
"是否以及如何利用用户和深度图像")]
public UserMapType computeUserMap = UserMapType.RawUserDepth;
[Tooltip("Whether to utilize the color camera image." +
"是否利用彩色相机图像")]
public bool computeColorMap = false;
[Tooltip("Whether to utilize the IR camera image." +
"是否使用IR相机图像。")]
public bool computeInfraredMap = false;
[Tooltip("Whether to display the user map on the screen." +
"是否在屏幕上显示用户地图。")]
public bool displayUserMap = false;
[Tooltip("Whether to display the color camera image on the screen." +
"是否在屏幕上显示彩色相机图像。")]
public bool displayColorMap = false;
[Tooltip("Whether to display skeleton lines over the the user map." +
"是否在用户地图上显示骨架线")]
public bool displaySkeletonLines = false;
// if percent is zero, it is calculated internally to match the selected width and height of the depth image
[Tooltip("Depth and color image width on the screen, as percent of the screen width. The image height is calculated depending on the width." +
"在屏幕上的深度和彩色图像宽度,如屏幕宽度的百分比。图像高度是根据宽度来计算的")]
public float DisplayMapsWidthPercent = 20f;
[Tooltip("Whether to use the multi-source reader, if one is available (K2-only feature)." +
"如果一个阅读器是可用的,是否使用多源阅读器(仅支持k2功能)。")]
public bool useMultiSourceReader = false;
// Public Bool to determine whether to use sensor's audio source, if available
//public bool useAudioSource = false;
[Tooltip("Minimum distance to a user, in order to be considered for skeleton data processing." +
"与用户的最小距离,以便用于骨架数据处理。")]
public float minUserDistance = 0.5f;
[Tooltip("Maximum distance to a user, in order to be considered for skeleton data processing. Value of 0 means no maximum distance limitation." +
"对用户的最大距离,以便考虑进行骨架数据处理。值0表示没有最大距离限制。")]
public float maxUserDistance = 0f;
[Tooltip("Maximum left or right distance to a user, in order to be considered for skeleton data processing. Value of 0 means no left/right distance limitation." +
"用户的最大左或右距离,以便用于骨架数据处理。值0表示没有左/右距离限制。")]
public float maxLeftRightDistance = 0f;
[Tooltip("Maximum number of users, which may be tracked simultaneously." +
"最大用户数,可同时跟踪。")]
public int maxTrackedUsers = 6;
[Tooltip("Whether to display the tracked users within the allowed distance only, or all users (higher fps)." +
"是在允许的距离内显示跟踪用户,还是显示所有用户(更高的fps)。")]
public bool showTrackedUsersOnly = true;
public enum UserDetectionOrder : int { Appearance = 0, Distance = 1, LeftToRight = 2 }
[Tooltip("How to assign users to player indices - by order of appearance, distance or left-to-right." +
"如何分配用户的球员指标-按外观,距离或从左到右。")]
public UserDetectionOrder userDetectionOrder = UserDetectionOrder.Appearance;
[Tooltip("Whether to utilize only the really tracked joints (and ignore the inferred ones) or not." +
"是否只使用真正跟踪的关节(忽略推断的关节)")]
public bool ignoreInferredJoints = false;
[Tooltip("Whether to ignore the Z-coordinates of the joints (i.e. to use them in 2D-scenes) or not." +
"是否忽略关节的z坐标(即在2d场景中使用它们)。")]
public bool ignoreZCoordinates = false;
[Tooltip("Whether to update the AvatarControllers in LateUpdate(), instead of in Update(). Needed for Mecanim animation blending." +
"是否在LateUpdate()中更新avatarcontroller,而不是在update()中更新。需要Mecanim动画混合")]
public bool lateUpdateAvatars = false;
[Tooltip("Whether to skip the remote avatar controllers in multiplayer games." +
"是否在多人游戏中跳过远程化身控制器。")]
public bool skipRemoteAvatars = false;
// [Tooltip("Uses own thread for processing kinect data.")]
// public bool useOwnThread = false;
public enum Smoothing : int { None, Default, Light, Medium, Aggressive }
[Tooltip("Set of joint smoothing parameters." +
"一组关节平滑参数。")]
public Smoothing smoothing = Smoothing.Default;
[Tooltip("Whether to apply the bone orientation constraints." +
"是否应用骨取向约束。")]
public bool useBoneOrientationConstraints = false;
//public bool useBoneOrientationsFilter = false;
[Tooltip("Whether to estimate the body joints velocities." +
"是否要估计人体关节的速度。")]
public bool estimateJointVelocities = false;
[Tooltip("Set of joint velocities smoothing parameters." +
"一组关节速度平滑参数。")]
public Smoothing velocitySmoothing = Smoothing.Light;
[Tooltip("Whether to allow detection of body turn arounds or not." +
"是否允许检测身体转动。")]
public bool allowTurnArounds = false;
public enum AllowedRotations : int { None = 0, Default = 1, All = 2 }
[Tooltip("Allowed wrist and hand rotations: None - no hand rotations are allowed, Default - hand rotations are allowed except for the twists, All - all rotations are allowed." +
"允许手腕和手部旋转:不允许手部旋转,默认情况下——除了扭转外,允许手部旋转——所有旋转都允许")]
public AllowedRotations allowedHandRotations = AllowedRotations.Default;
[Tooltip("Wait time in seconds, before a lost user gets removed. This is to prevent sporadical user switches." +
"在删除丢失的用户之前,以秒为单位等待时间。这是为了防止零星的用户交换机。")]
public float waitTimeBeforeRemove = 1f;
[Tooltip("List of the avatar controllers in the scene. If the list is empty, the available avatar controllers are detected at the scene start up." +
"场景中的化身控制器列表”)。如果列表为空,可用的avatar控制器将在场景启动时被检测出来。")]
public List<AvatarController> avatarControllers = new List<AvatarController>();
[Tooltip("Calibration pose required, to turn on the tracking of respective player." +
"需要的校正姿势,打开各自播放器的追踪")]
public KinectGestures.Gestures playerCalibrationPose;
[Tooltip("List of common gestures, to be detected for each player." +
"常见的手势列表,供每个玩家检测。")]
public List<KinectGestures.Gestures> playerCommonGestures = new List<KinectGestures.Gestures>();
[Tooltip("Minimum time between gesture detections (in seconds)." +
"手势检测之间的最小时间(以秒为单位)")]
public float minTimeBetweenGestures = 0.7f;
[Tooltip("Gesture manager, used to detect programmatic Kinect gestures." +
"手势管理器,用于检测程序化的Kinect手势。")]
public KinectGestures gestureManager;
[Tooltip("List of the gesture listeners in the scene. If the list is empty, the available gesture listeners will be detected at the scene start up." +
"场景中手势监听器的列表”)。如果列表为空,则在场景启动时将检测到可用的手势监听器。")]
public List<MonoBehaviour> gestureListeners = new List<MonoBehaviour>();
[Tooltip("GUI-Text to display user detection messages." +
"显示用户检测消息的GUI-Text。")]
public GUIText calibrationText;
[Tooltip("GUI-Text to display debug messages for the currently tracked gestures." +
"显示当前跟踪的手势的调试消息的GUI-Text。")]
public GUIText gesturesDebugText;
// Bool to keep track of whether Kinect has been initialized
protected bool kinectInitialized = false;
// The singleton instance of KinectManager
protected static KinectManager instance = null;
// available sensor interfaces
protected List<DepthSensorInterface> sensorInterfaces = null;
// primary SensorData structure
protected KinectInterop.SensorData sensorData = null;
// Depth and user maps
//protected KinectInterop.DepthBuffer depthImage;
//protected KinectInterop.BodyIndexBuffer bodyIndexImage;
//protected KinectInterop.UserHistogramBuffer userHistogramImage;
protected Color32[] usersHistogramImage;
protected ushort[] usersPrevState;
protected float[] usersHistogramMap;
protected Texture2D usersLblTex;
protected Rect usersMapRect;
protected int usersMapSize;
//protected int minDepth;
//protected int maxDepth;
// Color map
//protected KinectInterop.ColorBuffer colorImage;
//protected Texture2D usersClrTex;
protected Rect usersClrRect;
protected int usersClrSize;
// Kinect body frame data
protected KinectInterop.BodyFrameData bodyFrame;
//private Int64 lastBodyFrameTime = 0;
// List of all users
protected List<Int64> alUserIds = new List<Int64>();
protected Dictionary<Int64, int> dictUserIdToIndex = new Dictionary<Int64, int>();
protected Int64[] aUserIndexIds = new Int64[KinectInterop.Constants.MaxBodyCount];
protected Dictionary<Int64, float> dictUserIdToTime = new Dictionary<Int64, float>();
// Whether the users are limited by number or distance
protected bool bLimitedUsers = false;
// Primary (first or closest) user ID
protected Int64 liPrimaryUserId = 0;
// Kinect to world matrix
protected Matrix4x4 kinectToWorld = Matrix4x4.zero;
//private Matrix4x4 mOrient = Matrix4x4.zero;
// Calibration gesture data for each player
protected Dictionary<Int64, KinectGestures.GestureData> playerCalibrationData = new Dictionary<Int64, KinectGestures.GestureData>();
// gestures data and parameters
protected Dictionary<Int64, List<KinectGestures.GestureData>> playerGesturesData = new Dictionary<Int64, List<KinectGestures.GestureData>>();
protected Dictionary<Int64, float> gesturesTrackingAtTime = new Dictionary<Int64, float>();
List of Gesture Listeners. They must implement KinectGestures.GestureListenerInterface
//public List<KinectGestures.GestureListenerInterface> gestureListenerInts;
// Body filter instances
protected JointPositionsFilter jointPositionFilter = null;
protected BoneOrientationsConstraint boneConstraintsFilter = null;
//protected BoneOrientationsFilter boneOrientationFilter = null;
protected JointVelocitiesFilter jointVelocityFilter = null;
// background kinect thread
//protected System.Threading.Thread kinectReaderThread = null;
protected bool kinectReaderRunning = false;
// if the background removal was used before pause
protected bool backgroundRemovalInited = false;
protected bool backgroundRemovalHiRes = false;
/// <summary>
/// Gets the single KinectManager instance.
/// </summary>
/// <value>The KinectManager instance.</value>
public static KinectManager Instance
{
get
{
return instance;
}
}
/// <summary>
/// Determines if the sensor and KinectManager-component are initialized and ready to use.
/// </summary>
/// <returns><c>true</c> if Kinect is initialized; otherwise, <c>false</c>.</returns>
public static bool IsKinectInitialized()
{
return instance != null ? instance.kinectInitialized : false;
}
/// <summary>
/// Determines if the sensor and KinectManager-component are initialized and ready to use.
/// </summary>
/// <returns><c>true</c> if Kinect is initialized; otherwise, <c>false</c>.</returns>
public bool IsInitialized()
{
return kinectInitialized;
}
/// <summary>
/// Gets the sensor data structure (this structure should not be modified and must be used only internally).
/// </summary>
/// <returns>The sensor data.</returns>
internal KinectInterop.SensorData GetSensorData()
{
return sensorData;
}
/// <summary>
/// Gets the selected depth-sensor platform.
/// </summary>
/// <returns>The selected depth-sensor platform.</returns>
public KinectInterop.DepthSensorPlatform GetSensorPlatform()
{
if(sensorData != null && sensorData.sensorInterface != null)
{
return sensorData.sensorInterface.GetSensorPlatform();
}
return KinectInterop.DepthSensorPlatform.None;
}
/// <summary>
/// Gets the number of bodies, tracked by the sensor.
/// </summary>
/// <returns>The body count.</returns>
public int GetBodyCount()
{
return sensorData != null ? sensorData.bodyCount : 0;
}
/// <summary>
/// Gets the the number of body joints, tracked by the sensor.
/// </summary>
/// <returns>The count of joints.</returns>
public int GetJointCount()
{
return sensorData != null ? sensorData.jointCount : 0;
}
/// <summary>
/// Gets the index of the joint in the joint's array
/// </summary>
/// <returns>The joint's index in the array.</returns>
/// <param name="joint">Joint.</param>
public int GetJointIndex(KinectInterop.JointType joint)
{
if(sensorData != null && sensorData.sensorInterface != null)
{
return sensorData.sensorInterface.GetJointIndex(joint);
}
// fallback - index matches the joint
return (int)joint;
}
// // returns the joint at given index
// public KinectInterop.JointType GetJointAtIndex(int index)
// {
// if(sensorData != null && sensorData.sensorInterface != null)
// {
// return sensorData.sensorInterface.GetJointAtIndex(index);
// }
//
// // fallback - index matches the joint
// return (KinectInterop.JointType)index;
// }
/// <summary>
/// Gets the parent joint of the given joint.
/// </summary>
/// <returns>The parent joint.</returns>
/// <param name="joint">Joint.</param>
public KinectInterop.JointType GetParentJoint(KinectInterop.JointType joint)
{
if(sensorData != null && sensorData.sensorInterface != null)
{
return sensorData.sensorInterface.GetParentJoint(joint);
}
// fall back - return the same joint (i.e. end-joint)
return joint;
}
/// <summary>
/// Gets the next joint of the given joint.
/// </summary>
/// <returns>The next joint.</returns>
/// <param name="joint">Joint.</param>
public KinectInterop.JointType GetNextJoint(KinectInterop.JointType joint)
{
if(sensorData != null && sensorData.sensorInterface != null)
{
return sensorData.sensorInterface.GetNextJoint(joint);
}
// fall back - return the same joint (i.e. end-joint)
return joint;
}
/// <summary>
/// Gets the width of the color image, returned by the sensor.
/// </summary>
/// <returns>The color image width.</returns>
public int GetColorImageWidth()
{
return sensorData != null ? sensorData.colorImageWidth : 0;
}
/// <summary>
/// Gets the height of the color image, returned by the sensor.
/// </summary>
/// <returns>The color image height.</returns>
public int GetColorImageHeight()
{
return sensorData != null ? sensorData.colorImageHeight : 0;
}
/// <summary>
/// Gets the width of the depth image, returned by the sensor.
/// </summary>
/// <returns>The depth image width.</returns>
public int GetDepthImageWidth()
{
return sensorData != null ? sensorData.depthImageWidth : 0;
}
/// <summary>
/// Gets the height of the depth image, returned by the sensor.
/// </summary>
/// <returns>The depth image height.</returns>
public int GetDepthImageHeight()
{
return sensorData != null ? sensorData.depthImageHeight : 0;
}
/// <summary>
/// Gets the raw body index data, if ComputeUserMap is true.
/// </summary>
/// <returns>The raw body index data.</returns>
public byte[] GetRawBodyIndexMap()
{
return sensorData != null ? sensorData.bodyIndexImage : null;
}
/// <summary>
/// Gets the raw depth data, if ComputeUserMap is true.
/// </summary>
/// <returns>The raw depth map.</returns>
public ushort[] GetRawDepthMap()
{
return sensorData != null ? sensorData.depthImage : null;
}
/// <summary>
/// Gets the raw infrared data, if ComputeInfraredMap is true.
/// </summary>
/// <returns>The raw infrared map.</returns>
public ushort[] GetRawInfraredMap()
{
return sensorData != null ? sensorData.infraredImage : null;
}
/// <summary>
/// Gets the users' histogram texture, if ComputeUserMap is true
/// </summary>
/// <returns>The users histogram texture.</returns>
public Texture2D GetUsersLblTex()
{
return usersLblTex;
}
/// <summary>
/// Gets the color image texture,if ComputeColorMap is true
/// </summary>
/// <returns>The color image texture.</returns>
public Texture2D GetUsersClrTex()
{
//return usersClrTex;
return sensorData != null ? sensorData.colorImageTexture : null;
}
/// <summary>
/// Determines whether at least one user is currently detected by the sensor
/// </summary>
/// <returns><c>true</c> if at least one user is detected; otherwise, <c>false</c>.</returns>
public bool IsUserDetected()
{
return kinectInitialized && (alUserIds.Count > 0);
}
/// <summary>
/// Determines whether the user with the specified index is currently detected by the sensor
/// </summary>
/// <returns><c>true</c> if the user is detected; otherwise, <c>false</c>.</returns>
/// <param name="i">The user index.</param>
public bool IsUserDetected(int i)
{
if(i >= 0 && i < KinectInterop.Constants.MaxBodyCount)
{
return (aUserIndexIds[i] != 0);
}
return false;
}
/// <summary>
/// Determines whether the user with the specified userId is in the list of tracked users or not.
/// </summary>
/// <returns><c>true</c> if the user with the specified userId is tracked; otherwise, <c>false</c>.</returns>
/// <param name="userId">User identifier.</param>
public bool IsUserTracked(Int64 userId)
{
return dictUserIdToIndex.ContainsKey(userId);
}
/// <summary>
/// Gets the number of currently detected users.
/// </summary>
/// <returns>The users count.</returns>
public int GetUsersCount()
{
return alUserIds.Count;
}
/// <summary>
/// Gets IDs of all currently tracked users.
/// </summary>
/// <returns>The list of all currently tracked users.</returns>
public List<long> GetAllUserIds()
{
return new List<long>(alUserIds);
}
/// <summary>
/// Gets the user ID by the specified user index.
/// </summary>
/// <returns>The user ID by index.</returns>
/// <param name="i">The user index.</param>
public Int64 GetUserIdByIndex(int i)
{
// if(i >= 0 && i < alUserIds.Count)
// {
// return alUserIds[i];
// }
if(i >= 0 && i < KinectInterop.Constants.MaxBodyCount)
{
return aUserIndexIds[i];
}
return 0;
}
/// <summary>
/// Gets the user index by the specified user ID.
/// </summary>
/// <returns>The user index by user ID.</returns>
/// <param name="userId">User ID</param>
public int GetUserIndexById(Int64 userId)
{
// for(int i = 0; i < alUserIds.Count; i++)
// {
// if(alUserIds[i] == userId)
// {
// return i;
// }
// }
if(userId == 0)
return -1;
for(int i = 0; i < aUserIndexIds.Length; i++)
{
if(aUserIndexIds[i] == userId)
{
return i;
}
}
return -1;
}
/// <summary>
/// Gets the body index by the specified user ID, or -1 if the user ID does not exist.
/// </summary>
/// <returns>The body index by user ID.</returns>
/// <param name="userId">User ID</param>
public int GetBodyIndexByUserId(Int64 userId)
{
if(dictUserIdToIndex.ContainsKey(userId))
{
int index = dictUserIdToIndex[userId];
return index;
}
return -1;
}
/// <summary>
/// Gets the list of tracked body indices.
/// </summary>
/// <returns>The list of body indices.</returns>
public List<int> GetTrackedBodyIndices()
{
List<int> alBodyIndices = new List<int>(dictUserIdToIndex.Values);
return alBodyIndices;
}
/// <summary>
/// Determines whether the tracked users are limited by their number or distance or not.
/// </summary>
/// <returns><c>true</c> if the users are limited by number or distance; otherwise, <c>false</c>.</returns>
public bool IsTrackedUsersLimited()
{
return bLimitedUsers;
}
/// <summary>
/// Gets the UserID of the primary user (the first or the closest one), or 0 if no user is detected.
/// </summary>
/// <returns>The primary user ID.</returns>
public Int64 GetPrimaryUserID()
{
return liPrimaryUserId;
}
/// <summary>
/// Sets the primary user ID, in order to change the active user.
/// </summary>
/// <returns><c>true</c>, if primary user ID was set, <c>false</c> otherwise.</returns>
/// <param name="userId">User ID</param>
public bool SetPrimaryUserID(Int64 userId)
{
bool bResult = false;
if(alUserIds.Contains(userId) || (userId == 0))
{
liPrimaryUserId = userId;
bResult = true;
}
return bResult;
}
/// <summary>
/// Gets the body index [0-5], if there is single body selected to be displayed on the user map, or -1 if all bodies are displayed.
/// </summary>
/// <returns>The displayed body index [0-5], or -1 if all bodies are displayed.</returns>
public int GetDisplayedBodyIndex()
{
if(sensorData != null)
{
return sensorData.selectedBodyIndex != 255 ? sensorData.selectedBodyIndex : -1;
}
return -1;
}
/// <summary>
/// Sets the body index [0-5], if a single body must be displayed on the user map, or -1 if all bodies must be displayed.
/// </summary>
/// <returns><c>true</c>, if the change was successful, <c>false</c> otherwise.</returns>
/// <param name="iBodyIndex">The single body index, or -1 if all bodies must be displayed.</param>
public bool SetDisplayedBodyIndex(int iBodyIndex)
{
if(sensorData != null)
{
sensorData.selectedBodyIndex = (byte)(iBodyIndex >= 0 ? iBodyIndex : 255);
}
return false;
}
/// <summary>
/// Gets the last body frame timestamp.
/// </summary>
/// <returns>The last body frame timestamp.</returns>
public long GetBodyFrameTimestamp()
{
return bodyFrame.liRelativeTime;
}
// do not change the data in the structure directly
/// <summary>
/// Gets the user body data (for internal purposes only).
/// </summary>
/// <returns>The user body data.</returns>
/// <param name="userId">User ID</param>
internal KinectInterop.BodyData GetUserBodyData(Int64 userId)
{
if(dictUserIdToIndex.ContainsKey(userId))
{
int index = dictUserIdToIndex[userId];
if(index >= 0 && index < sensorData.bodyCount)
{
return bodyFrame.bodyData[index];
}
}
return new KinectInterop.BodyData();
}
/// <summary>
/// Gets the kinect to world matrix.
/// </summary>
/// <returns>The kinect to world matrix.</returns>
public Matrix4x4 GetKinectToWorldMatrix()
{
return kinectToWorld;
}
/// <summary>
/// Updates the kinect to world transform matrix, according to the current values of SensorHeight, SensorAngle and FlipLeftRight.
/// </summary>
public void UpdateKinectToWorldMatrix()
{
//create the transform matrix - kinect to world
Vector3 vSensorPos = new Vector3(0f, sensorHeight, 0f);
Quaternion qSensorRot = Quaternion.Euler(-sensorAngle, 0f, 0f);
Vector3 vSensorScale = !flipLeftRight ? Vector3.one : new Vector3(-1f, 1f, 1f);
kinectToWorld.SetTRS(vSensorPos, qSensorRot, vSensorScale);
}
/// <summary>
/// Sets the kinect to world matrix.
/// </summary>
/// <param name="sensorPos">Sensor position.</param>
/// <param name="sensorRot">Sensor rotation.</param>
/// <param name="sensorScale">Position scale (could be used to flip left-right).</param>
public void SetKinectToWorldMatrix(Vector3 sensorPos, Quaternion sensorRot, Vector3 sensorScale)
{
kinectToWorld.SetTRS(sensorPos, sensorRot, sensorScale);
sensorHeight = sensorPos.y;
sensorAngle = -sensorRot.eulerAngles.x;
flipLeftRight = sensorScale.x < 0f;
// enable or disable getting height and angle info
autoHeightAngle = AutoHeightAngle.DontUse;
sensorData.hintHeightAngle = (autoHeightAngle != AutoHeightAngle.DontUse);
}
/// <summary>
/// Gets the user position, relative to the sensor, in meters.
/// </summary>
/// <returns>The user position.</returns>
/// <param name="userId">User ID</param>
public Vector3 GetUserPosition(Int64 userId)
{
if(dictUserIdToIndex.ContainsKey(userId))
{
int index = dictUserIdToIndex[userId];
if(index >= 0 && index < sensorData.bodyCount &&
bodyFrame.bodyData[index].bIsTracked != 0)
{
return bodyFrame.bodyData[index].position;
}
}
return Vector3.zero;
}
/// <summary>
/// Gets the user orientation.
/// </summary>
/// <returns>The user rotation.</returns>
/// <param name="userId">User ID</param>
/// <param name="flip">If set to <c>true</c>, this means non-mirrored rotation.</param>
public Quaternion GetUserOrientation(Int64 userId, bool flip)
{
if(dictUserIdToIndex.ContainsKey(userId))
{
int index = dictUserIdToIndex[userId];
if(index >= 0 && index < sensorData.bodyCount &&
bodyFrame.bodyData[index].bIsTracked != 0)
{
if(flip)
return bodyFrame.bodyData[index].normalRotation;
else
return bodyFrame.bodyData[index].mirroredRotation;
}
}
return Quaternion.identity;
}
/// <summary>
/// Gets the tracking state of the joint.
/// </summary>
/// <returns>The joint tracking state.</returns>
/// <param name="userId">User ID</param>
/// <param name="joint">Joint index</param>
public KinectInterop.TrackingState GetJointTrackingState(Int64 userId, int joint)
{
if(dictUserIdToIndex.ContainsKey(userId))
{
int index = dictUserIdToIndex[userId];
if(index >= 0 && index < sensorData.bodyCount &&
bodyFrame.bodyData[index].bIsTracked != 0)
{
if(joint >= 0 && joint < sensorData.jointCount)
{
return bodyFrame.bodyData[index].joint[joint].trackingState;
}
}
}
return KinectInterop.TrackingState.NotTracked;
}
/// <summary>
/// Determines whether the given joint of the specified user is being tracked.
/// </summary>
/// <returns><c>true</c> if this instance is joint tracked the specified userId joint; otherwise, <c>false</c>.</returns>
/// <param name="userId">User ID</param>
/// <param name="joint">Joint index</param>
public bool IsJointTracked(Int64 userId, int joint)
{
if(dictUserIdToIndex.ContainsKey(userId))
{
int index = dictUserIdToIndex[userId];
if(index >= 0 && index < sensorData.bodyCount &&
bodyFrame.bodyData[index].bIsTracked != 0)
{
if(joint >= 0 && joint < sensorData.jointCount)
{
KinectInterop.JointData jointData = bodyFrame.bodyData[index].joint[joint];
return ignoreInferredJoints ? (jointData.trackingState == KinectInterop.TrackingState.Tracked) :
(jointData.trackingState != KinectInterop.TrackingState.NotTracked);
}
}
}
return false;
}
/// <summary>
/// Gets the joint position of the specified user, in Kinect coordinate system, in meters.
/// </summary>
/// <returns>The joint kinect position.</returns>
/// <param name="userId">User ID</param>
/// <param name="joint">Joint index</param>
public Vector3 GetJointKinectPosition(Int64 userId, int joint)
{
if(dictUserIdToIndex.ContainsKey(userId))
{
int index = dictUserIdToIndex[userId];
if(index >= 0 && index < sensorData.bodyCount &&
bodyFrame.bodyData[index].bIsTracked != 0)
{
if(joint >= 0 && joint < sensorData.jointCount)
{
KinectInterop.JointData jointData = bodyFrame.bodyData[index].joint[joint];
return jointData.kinectPos;
}
}
}
return Vector3.zero;
}
/// <summary>
/// Gets the joint position of the specified user, in meters.
/// </summary>
/// <returns>The joint position.</returns>
/// <param name="userId">User ID</param>
/// <param name="joint">Joint index</param>
public Vector3 GetJointPosition(Int64 userId, int joint)
{
if(dictUserIdToIndex.ContainsKey(userId))
{
int index = dictUserIdToIndex[userId];
if(index >= 0 && index < sensorData.bodyCount &&
bodyFrame.bodyData[index].bIsTracked != 0)
{
if(joint >= 0 && joint < sensorData.jointCount)
{
KinectInterop.JointData jointData = bodyFrame.bodyData[index].joint[joint];
return jointData.position;
}
}
}
return Vector3.zero;
}
/// <summary>
/// Gets the joint position of the specified user with flipped x-coordinate, in meters.
/// </summary>
/// <returns>The joint position.</returns>
/// <param name="userId">User ID</param>
/// <param name="joint">Joint index</param>
public Vector3 GetJointPositionFlipX(Int64 userId, int joint)
{
if(dictUserIdToIndex.ContainsKey(userId))
{
int index = dictUserIdToIndex[userId];
if(index >= 0 && index < sensorData.bodyCount &&
bodyFrame.bodyData[index].bIsTracked != 0)
{
if(joint >= 0 && joint < sensorData.jointCount)
{
KinectInterop.JointData jointData = bodyFrame.bodyData[index].joint[joint];
Vector3 jointPos = jointData.position;
jointPos.x = -jointPos.x;
return jointPos;
}
}
}
return Vector3.zero;
}
/// <summary>
/// Gets the joint velocity for the specified user and joint, in meters/s.
/// </summary>
/// <returns>The joint velocity.</returns>
/// <param name="userId">User ID.</param>
/// <param name="joint">Joint index.</param>
public Vector3 GetJointVelocity(Int64 userId, int joint)
{
if(dictUserIdToIndex.ContainsKey(userId))
{
int index = dictUserIdToIndex[userId];
if(index >= 0 && index < sensorData.bodyCount &&
bodyFrame.bodyData[index].bIsTracked != 0)
学习kinect 最重要的脚本——3000行代码——KinectManager 注释
最新推荐文章于 2022-04-15 19:19:07 发布
本文详细解读了使用Kinect进行开发时的关键脚本KinectManager,通过3000行代码的注释,深入剖析其工作原理和关键功能,包括肢体跟踪、深度映射等技术。
摘要由CSDN通过智能技术生成