手势虚拟键盘

 定义一个HandDetector类

import cv2
import mediapipe as mp
import math


class HandDetector:
    """
    Finds Hands using the mediapipe library. Exports the landmarks
    in pixel format. Adds extra functionalities like finding how
    many fingers are up or the distance between two fingers. Also
    provides bounding box info of the hand found.
    """

    def __init__(self, mode=False, maxHands=2, detectionCon=0.5, minTrackCon=0.5):
        """
        :param mode: In static mode, detection is done on each image: slower
        :param maxHands: Maximum number of hands to detect
        :param detectionCon: Minimum Detection Confidence Threshold
        :param minTrackCon: Minimum Tracking Confidence Threshold
        """
        self.mode = mode
        self.maxHands = maxHands
        self.detectionCon = detectionCon
        self.minTrackCon = minTrackCon

        self.mpHands = mp.solutions.hands
        self.hands = self.mpHands.Hands(static_image_mode=self.mode, max_num_hands=self.maxHands,
                                        min_detection_confidence=self.detectionCon, min_tracking_confidence = self.minTrackCon)
        self.mpDraw = mp.solutions.drawing_utils
        self.tipIds = [4, 8, 12, 16, 20]
        self.fingers = []
        self.lmList = []
        
    def findPosition(self, img, draw=True):
        self.lmList = []
        if self.results.multi_hand_landmarks:
            for handLms in self.results.multi_hand_landmarks:
                for id, lm in enumerate(handLms.landmark):
                    h, w, c = img.shape
                    cx, cy = int(lm.x * w), int(lm.y * h)
                    # print(id, cx, cy)
                    self.lmList.append([id, cx, cy])
                    if draw:
                        cv2.circle(img, (cx, cy), 12, (255, 0, 255), cv2.FILLED)
        return self.lmList

    def findHands(self, img, draw=True, flipType=True):
        """
        Finds hands in a BGR image.
        :param img: Image to find the hands in.
        :param draw: Flag to draw the output on the image.
        :return: Image with or without drawings
        """
        imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        self.results = self.hands.process(imgRGB)
        allHands = []
        h, w, c = img.shape
        if  self.results.multi_hand_landmarks:
            for handType,handLms in zip(self.results.multi_handedness,self.results.multi_hand_landmarks):
                myHand={}
                ## lmList
                mylmList = []
                xList = []
                yList = []
                for id, lm in enumerate(handLms.landmark):
                    px, py = int(lm.x * w), int(lm.y * h)
                    mylmList.append([px, py])
                    xList.append(px)
                    yList.append(py)

                ## bbox
                xmin, xmax = min(xList), max(xList)
                ymin, ymax = min(yList), max(yList)
                boxW, boxH = xmax - xmin, ymax - ymin
                bbox = xmin, ymin, boxW, boxH
                cx, cy = bbox[0] + (bbox[2] // 2), \
                         bbox[1] + (bbox[3] // 2)

                myHand["lmList"] = mylmList
                myHand["bbox"] = bbox
                myHand["center"] =  (cx, cy)

                if flipType:
                    if handType.classification[0].label =="Right":
                        myHand["type"] = "Left"
                    else:
                        myHand["type"] = "Right"
                else:myHand["type"] = handType.classification[0].label
                allHands.append(myHand)

                ## draw
                if draw:
                    self.mpDraw.draw_landmarks(img, handLms,
                                               self.mpHands.HAND_CONNECTIONS)
                    cv2.rectangle(img, (bbox[0] - 20, bbox[1] - 20),
                                  (bbox[0] + bbox[2] + 20, bbox[1] + bbox[3] + 20),
                                  (255, 0, 255), 2)
                    cv2.putText(img,myHand["type"],(bbox[0] - 30, bbox[1] - 30),cv2.FONT_HERSHEY_PLAIN,
                                2,(255, 0, 255),2)
        if draw:
            return allHands,img
        else:
            return allHands

    def fingersUp(self,myHand):
        """
        Finds how many fingers are open and returns in a list.
        Considers left and right hands separately
        :return: List of which fingers are up
        """
        myHandType =myHand["type"]
        myLmList = myHand["lmList"]
        if self.results.multi_hand_landmarks:
            fingers = []
            # Thumb
            if myHandType == "Right":
                if myLmList[self.tipIds[0]][0] > myLmList[self.tipIds[0] - 1][0]:
                    fingers.append(1)
                else:
                    fingers.append(0)
            else:
                if myLmList[self.tipIds[0]][0] < myLmList[self.tipIds[0] - 1][0]:
                    fingers.append(1)
                else:
                    fingers.append(0)

            # 4 Fingers
            for id in range(1, 5):
                if myLmList[self.tipIds[id]][1] < myLmList[self.tipIds[id] - 2][1]:
                    fingers.append(1)
                else:
                    fingers.append(0)
        return fingers

    def findDistance(self,p1, p2, img=None):
        """
        Find the distance between two landmarks based on their
        index numbers.
        :param p1: Point1
        :param p2: Point2
        :param img: Image to draw on.
        :param draw: Flag to draw the output on the image.
        :return: Distance between the points
                 Image with output drawn
                 Line information
        """

        x1, y1 = p1
        x2, y2 = p2
        cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
        length = math.hypot(x2 - x1, y2 - y1)
        info = (x1, y1, x2, y2, cx, cy)
        if img is not None:
            cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
            cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)
            cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)
            cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
            return length,info, img
        else:
            return length, info


        


def main():
    cap = cv2.VideoCapture(0)
    detector = HandDetector(detectionCon=0.8, maxHands=2)
    while True:
        # Get image frame
        success, img = cap.read()
        # Find the hand and its landmarks
        hands, img = detector.findHands(img)  # with draw
        # hands = detector.findHands(img, draw=False)  # without draw

        if hands:
            # Hand 1
            hand1 = hands[0]
            lmList1 = hand1["lmList"]  # List of 21 Landmark points
            bbox1 = hand1["bbox"]  # Bounding box info x,y,w,h
            centerPoint1 = hand1['center']  # center of the hand cx,cy
            handType1 = hand1["type"]  # Handtype Left or Right

            fingers1 = detector.fingersUp(hand1)

            if len(hands) == 2:
                # Hand 2
                hand2 = hands[1]
                lmList2 = hand2["lmList"]  # List of 21 Landmark points
                bbox2 = hand2["bbox"]  # Bounding box info x,y,w,h
                centerPoint2 = hand2['center']  # center of the hand cx,cy
                handType2 = hand2["type"]  # Hand Type "Left" or "Right"

                fingers2 = detector.fingersUp(hand2)

                # Find Distance between two Landmarks. Could be same hand or different hands
                length, info, img = detector.findDistance(lmList1[8], lmList2[8], img)  # with draw
                # length, info = detector.findDistance(lmList1[8], lmList2[8])  # with draw
        # Display
        cv2.imshow("Image", img)
        cv2.waitKey(1)


if __name__ == "__main__":
    main()

虚拟输入

import cv2
from cvzone.HandTrackingModule import HandDetector
from time import sleep
import pyautogui
import cvzone
from pynput.keyboard import Key,Controller
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)

# 识别手势
detector = HandDetector(detectionCon=0.8)
keyboard = Controller()

# 键盘关键字
keys = [['Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P'],
        ['A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L', ';'],
        ['Z', 'X', 'C', 'V', 'B', 'N', 'M', ',', '.', '/']]



class Button():
    def __init__(self, pos, text, size = [50, 50]):
        self.pos = pos
        self.text = text
        self.size = size
   
   
buttonList = []
finalText = ''
for j in range(len(keys)):
    for x, key in enumerate(keys[j]):
        # 循环创建buttonList对象列表
        buttonList.append(Button([60*x+20,100+j*60],key))


def drawAll(img,buttonList):
    for button in buttonList:
        x, y = button.pos
        w, h = button.size
        cvzone.cornerRect(img, (x, y, w, h),20,rt = 0)
        cv2.rectangle(img, button.pos, (x + w, y + h), (255, 0, 255), cv2.FILLED)
        cv2.putText(img, button.text, (x + 10, y + 40),
                    cv2.FONT_HERSHEY_PLAIN, 3, (255, 255, 255), 2)
    return img


while True:
    success, img = cap.read()
    # 识别手势
    hand = detector.findHands(img)
    lmList = detector.findPosition(img, True)

    # img = mybutton.draw(img)
    img = drawAll(img,buttonList )
    if lmList:
        for button in buttonList:
            x,y = button.pos
            w,h = button.size
            if x<lmList[8][1]<x+w and y<lmList[8][2]<y+h:
                cv2.rectangle(img, (x-5,y-5), (x + w + 5, y + h + 5), (175, 0, 175), cv2.FILLED)
                cv2.putText(img, button.text, (x + 10, y + 40),
                            cv2.FONT_HERSHEY_PLAIN, 3, (255, 255, 255), 2)

                l,_,_ = detector.findDistance(lmList[8][1:],lmList[12][1:],img)
                print('中指(12)和食指(8)之间的距离:',l)
                if l < 120:
                    keyboard.press(button.text)
                    pyautogui.typewrite(f"{button.text}", 0.5)
                    cv2.rectangle(img, button.pos, (x + w, y + h), (0, 255, 0), cv2.FILLED)
                    cv2.putText(img, button.text, (x + 10, y + 40),
                                cv2.FONT_HERSHEY_PLAIN, 3, (255, 255, 255), 2)
                    finalText += button.text
                    print('当前选中的是:', button.text)
                    sleep(0.2)
    cv2.rectangle(img, (20,350), (600, 400), (175, 0, 175), cv2.FILLED)
    cv2.putText(img, finalText, (20, 390),
                cv2.FONT_HERSHEY_PLAIN, 3, (255, 255, 255), 4)
    cv2.imshow("Image",img)
    cv2.waitKey(1)

 

 最后:代码我完全开源可用,需要安装必要的python包。

  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 16
    评论
好的,下面是一个简单的手势控制虚拟键盘Python 代码示例,需要安装 OpenCV 库和 pyautogui 库: ```python import cv2 import numpy as np import pyautogui # 设置虚拟键盘位置 keyboard_x, keyboard_y = 100, 100 # 创建 HSV 色彩范围 lower_skin = np.array([0, 20, 70], dtype=np.uint8) upper_skin = np.array([20, 255, 255], dtype=np.uint8) # 捕获摄像头 cap = cv2.VideoCapture(0) while True: # 读取每一帧 ret, frame = cap.read() # 翻转帧以使左右反转 frame = cv2.flip(frame, 1) # 转换颜色空间为 HSV hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # 创建掩膜,检测肤色 mask = cv2.inRange(hsv, lower_skin, upper_skin) # 对掩膜进行开操作,去除噪声 kernel = np.ones((3, 3), np.uint8) mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) # 找到最大轮廓 contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(contours) > 0: contour = max(contours, key=cv2.contourArea) # 创建凸包并进行凸性缺陷检测 hull = cv2.convexHull(contour) defects = cv2.convexityDefects(contour, cv2.convexHull(contour, returnPoints=False)) if defects is not None: for i in range(defects.shape[0]): s, e, f, d = defects[i, 0] start = tuple(contour[s][0]) end = tuple(contour[e][0]) far = tuple(contour[f][0]) # 计算三角形的两条边长和斜边长度 a = np.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2) b = np.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2) c = np.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2) angle = np.arccos((b**2 + c**2 - a**2) / (2*b*c)) * 180 / np.pi # 如果角度小于 90 度,则认为是手指 if angle < 90: # 在手指位置绘制圆圈 cv2.circle(frame, far, 5, (0, 255, 0), -1) # 根据手指位置移动虚拟鼠标 x, y = far x = int(x * 1366 / 640) + keyboard_x y = int(y * 768 / 480) + keyboard_y pyautogui.moveTo(x, y) # 显示虚拟键盘 cv2.imshow('Keyboard', cv2.imread('keyboard.png')) cv2.moveWindow('Keyboard', keyboard_x, keyboard_y) # 显示帧并等待按键事件 cv2.imshow('Frame', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # 释放摄像头,关闭窗口 cap.release() cv2.destroyAllWindows() ``` 该代码使用摄像头捕获手势,并根据手指位置在虚拟键盘上移动鼠标。请确保在代码中将虚拟键盘的位置设置为你的屏幕上的正确位置,并将 `keyboard.png` 替换为你自己的虚拟键盘图像。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 16
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

薄了代码

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值