Leapmotion 左右上下前后挥动手势设计,动态手势

using System.Collections;
using System.Collections.Generic;
using UnityEngine;

using Leap;
using Leap.Unity;

public class LeapGestures : MonoBehaviour {

	public static bool Gesture_left = false;
	public static bool Gesture_right = false;
	public static bool Gesture_up = false;
	public static bool Gesture_down = false;
	public static bool Gesture_zoom = false;
	public static float movePOs = 0.0f;

	private LeapProvider mProvider;
	private Frame mFrame;
	private Hand mHand;



	private Vector leftPosition;
	private Vector rightPosition;
	public static float zoom = 1.0f;
	[Tooltip ("Velocity (m/s) of Palm ")]

	public float smallestVelocity = 1.45f;//手掌移动的最小速度

	[Tooltip ("Velocity (m/s) of Single Direction ")]
	[Range(0,1)]
	public float deltaVelocity = 1.0f;//单方向上手掌移动的速度

	// Use this for initialization
	void Start () {
		mProvider = FindObjectOfType<LeapProvider>() as LeapProvider;
	}
	
	// Update is called once per frame
	void Update () {

		mFrame = mProvider.CurrentFrame;//获取当前帧
		//获得手的个数
		//print ("hand num are " + mFrame.Hands.Count);

		if (mFrame.Hands.Count > 0) {
			if (mFrame.Hands.Count == 2) 
				zoom = CalcuateDistance(mFrame);

			if (mFrame.Hands.Count == 1) 
				LRUDGestures(mFrame,ref movePOs);
		}
	}

	 
	float CalcuateDistance(Frame mFrame)
	{
		Gesture_zoom = true;
		Gesture_left = false;
		Gesture_right = false;

		float distance = 0f;
		//print ("Two hands");
		foreach (var itemHands in mFrame.Hands) {
			if (itemHands.IsLeft) {
				leftPosition = itemHands.PalmPosition;
				//print ("leftPosition" + leftPosition);
			}
			if (itemHands.IsRight) {
				rightPosition = itemHands.PalmPosition;
				//print ("rightPosition" + rightPosition);
			}
		}

		if (leftPosition != Vector.Zero && rightPosition != Vector.Zero ) {

			Vector3 leftPos = new Vector3 (leftPosition.x, leftPosition.y,leftPosition.z);
			Vector3 rightPos = new Vector3 (rightPosition.x, rightPosition.y,rightPosition.z);

			distance = 10*Vector3.Distance (leftPos, rightPos);
			print ("distance" + distance);
		}

		if (distance != 0)
			return distance;
		else
			return distance = 1;
	}




	void LRUDGestures(Frame mFrame, ref float movePOs)
	{
		Gesture_zoom = false;
		foreach (var item in mFrame.Hands) {
			int numFinger = item.Fingers.Count;
			//print ("item is  " + numFinger);

			//print("hand are " + isOpenFullHand (item));
			// print ("isOpenFullHands is  " + isOpenFullHands(item));


			if (item.GrabStrength == 1) {
				//print ("num is 0, gestures is woquan");

			}else if ( item.GrabStrength == 0) {
				//print ("num is 5, open your hand");
				//print("PalmVelocity" + item.PalmVelocity);
				//print("PalmPosition" + item.PalmPosition);
				movePOs = item.PalmPosition.x;
				if (isMoveLeft (item)) {
					Gesture_left = true;
					Gesture_right = false;
					print ("move left");

				} else if (isMoveRight (item)) {
					Gesture_left = false;
					Gesture_right = true;
					print ("move Right");

				}else if (isMoveUp (item)) {
					Gesture_left = false;
					Gesture_right = false;
					print ("move Up");

				}else if (isMoveDown (item)) {
					Gesture_left = false;
					Gesture_right = false;
					print ("move Down");

				}else if (isMoveForward (item)) {
					Gesture_left = false;
					Gesture_right = false;
					print ("move Forward");

				}else if (isMoveBack (item)) {
					Gesture_left = false;
					Gesture_right = false;
					print ("move back");

				}
			}
		}
	}



	private bool isStone(Hand hand)
	{
		//print ("hand.GrabAngle" + hand.GrabAngle);
		return hand.GrabAngle > 2.0f;
	}
	//是否抓取
	public bool isGrabHand (Hand hand)  		
	{
		return hand.GrabStrength > 0.8f;    	//抓取力 
	}


	//hand move four direction
	public bool isMoveRight (Hand hand)   	
	{

		return hand.PalmVelocity.x > deltaVelocity && !isStationary (hand);
	}  

	// 手划向右边
	public bool isMoveLeft (Hand hand)		
	{

		//print (hand.PalmVelocity.x );
		return hand.PalmVelocity.x < -deltaVelocity && !isStationary (hand);
	}  

	//手向上 
	public bool isMoveUp (Hand hand)   	
	{
		//print ("hand.PalmVelocity.y" + hand.PalmVelocity.y);

		return hand.PalmVelocity.y > deltaVelocity && !isStationary (hand);
	}

	//手向下  
	public bool isMoveDown (Hand hand) 		
	{
		return hand.PalmVelocity.y < -deltaVelocity && !isStationary (hand);
	}


	//手向前
	public bool isMoveForward (Hand hand)   	
	{
		//print (hand.PalmVelocity.z);
		return hand.PalmVelocity.z > deltaVelocity && !isStationary (hand);
	}

	//手向后 
	public bool isMoveBack (Hand hand) 		
	{
		return hand.PalmVelocity.z < -deltaVelocity && !isStationary (hand);
	}

	//固定不动的
	public bool isStationary (Hand hand)		 
	{
		return hand.PalmVelocity.Magnitude < smallestVelocity;		//Vector3.Magnitude返回向量的长度
	}


}


deltaVelocity的值可以通过把 hand.PalmVelocity.x/y/z的值打印出来根据实际情况设置一个合理的值,x,y,z不同方向时其值的正负是不同的,依据此可以设计一些简单手势。

上下左右四个手势还是很稳定的。

  • 5
    点赞
  • 47
    收藏
    觉得还不错? 一键收藏
  • 11
    评论
好的,我可以为您提供一个简单的手势识别程序。首先,我们需要使用计算机摄像头获取手势图像。然后,我们可以使用图像处理技术来提取手势的特征。最后,我们可以使用机器学习算法来识别手势。 下面是一个简单的手势识别程序示例,使用Python编写: ```python import cv2 import numpy as np from sklearn.neighbors import KNeighborsClassifier # 初始化摄像头 cap = cv2.VideoCapture(0) # 定义手势识别分类器 knn = KNeighborsClassifier(n_neighbors=3) # 定义手势分类标签和特征 labels = ['left', 'right', 'up', 'down'] features = [] # 定义图像处理函数 def process_image(image): # 将图像转换为灰度图 gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 对灰度图进行高斯模糊 blur = cv2.GaussianBlur(gray, (5, 5), 0) # 对模糊图进行二值化 ret, thresh = cv2.threshold(blur, 60, 255, cv2.THRESH_BINARY) # 找到图像边缘 edges = cv2.Canny(thresh, 100, 200) # 计算轮廓 contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # 找到最大轮廓 max_contour = max(contours, key=cv2.contourArea) # 计算最大轮廓的外接矩形 x, y, w, h = cv2.boundingRect(max_contour) # 提取手势特征 feature = np.array([x, y, w, h]) return feature # 训练手势识别分类器 for label in labels: print('请展示手势:', label) while True: # 读取摄像头图像 ret, frame = cap.read() # 处理图像并提取特征 feature = process_image(frame) # 显示处理后的图像 cv2.imshow('frame', frame) # 检测是否按下ESC键 key = cv2.waitKey(1) if key == 27: break # 如果按下空格键,则将特征添加到特征列表中 if key == ord(' '): features.append(feature) break # 训练手势识别分类器 knn.fit(features, labels) # 实时手势识别 while True: # 读取摄像头图像 ret, frame = cap.read() # 处理图像并提取特征 feature = process_image(frame) # 使用分类器进行手势识别 label = knn.predict([feature])[0] # 在图像上显示识别结果 cv2.putText(frame, label, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) # 显示处理后的图像 cv2.imshow('frame', frame) # 检测是否按下ESC键 key = cv2.waitKey(1) if key == 27: break # 关闭摄像头和窗口 cap.release() cv2.destroyAllWindows() ``` 该程序使用K最近邻算法对手势进行分类。它首先训练分类器,并要求用户展示不同的手势以收集特征和标签。然后,它使用摄像头捕获实时图像并使用分类器进行手势识别。最后,它将识别结果显示在图像上。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 11
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值