最近看了几篇fps游戏爆头的视频,自己也写了python程序做了一个。
程序先预热,然后空格键激活在固定区域截屏,分析屏幕中的人头,将鼠标移动到第一个人的人头位置。
程序简单,大家可以在此基础上自由发挥。
import sys
import os
import cv2
from PIL import ImageGrab
import keyboard
from datetime import datetime
import pyautogui
try:
# 需要修改成你的目录
openpose_dir_path = 'D:/human-pose/openpose'
sys.path.append(openpose_dir_path + '/bin/python/openpose/Release')
os.environ['PATH'] = os.environ['PATH'] + ';' + openpose_dir_path + '/bin;' + openpose_dir_path + '/x64/Release;'
import pyopenpose as op
except ImportError as e:
print('Error: OpenPose library could not be found. '
'Did you enable `BUILD_PYTHON` in CMake and have '
'this Python script in the right folder?')
# raise e
print(e)
sys.exit(-1)
params = dict()
params["model_folder"] = openpose_dir_path + '/models/'
# params["face"] = True
# params["face_detector"] = 2
# params["hand"] = True
# params["body"] = 0
# Starting OpenPose
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
# 要检测的图片路径
image_path = "D:/human-pose/openpose/examples/p/COCO_val2014_000000000536.jpg"
image_path = 'person/bus.jpg'
image_path = 'person/zidane.jpg'
# Process Image
datum = op.Datum()
imageToProcess = cv2.imread(image_path)
datum.cvInputData = imageToProcess
# faceRectangles = [
# op.Rectangle(330.119385, 277.532715, 48.717274, 48.717274),
# op.Rectangle(24.036991, 267.918793, 65.175171, 65.175171),
# op.Rectangle(151.803436, 32.477852, 108.295761, 108.295761),
# ]
# datum.faceRectangles = faceRectangles
opWrapper.emplaceAndPop(op.VectorDatum([datum]))
# Display Image
print("Body keypoints: \n" + str(datum.poseKeypoints)) # Pose Output Format (BODY_25)
print("Face keypoints: \n" + str(datum.faceKeypoints))
print("Left hand keypoints: \n" + str(datum.handKeypoints[0]))
print("Right hand keypoints: \n" + str(datum.handKeypoints[1]))
# cv2.imshow("OpenPose 1.7.0 - test image", datum.cvOutputData)
def keyboard_callback(key):
print('keyboard code:', key.scan_code)
if key.scan_code == 57: # space
sd = datetime.now().timestamp()
image_path = 'person/screenshot_' + str(datetime.now().timestamp()) + '.jpg'
img = ImageGrab.grab(bbox=(0, 0, 640, 640)) # take a screenshot
img.save(image_path, "JPEG")
datum = op.Datum()
imageToProcess = cv2.imread(image_path)
datum.cvInputData = imageToProcess
opWrapper.emplaceAndPop(op.VectorDatum([datum]))
ed = datetime.now().timestamp()
print('cost time:', ed - sd)
points = datum.poseKeypoints
print(points[0][0][0], points[0][0][1])
pyautogui.moveTo(points[0][0][0], points[0][0][1])
# print("Body keypoints: \n" + str(datum.poseKeypoints))
elif key == 27: # ESC
sys.exit(0)
keyboard.on_press(keyboard_callback)
# while True:
# key = keyboard.wait('q')
# print(key)
# if key == 32: # space
# image_path = 'person/screenshot.jpg'
# img = ImageGrab.grab(bbox=(0, 0, 640, 640)) # take a screenshot
# img.save(image_path, "JPEG")
# datum = op.Datum()
# imageToProcess = cv2.imread(image_path)
# datum.cvInputData = imageToProcess
# opWrapper.emplaceAndPop(op.VectorDatum([datum]))
# print("Body keypoints: \n" + str(datum.poseKeypoints))
# elif key == 27: # ESC
# break
# key = cv2.waitKey(0)
# print(key)
# sys.exit(0)