main方法运行,运行后自动打开摄像头,按住鼠标中间追踪人脸。
import cv2 as cv
import pyautogui as ui
"""封装鼠标检测人脸"""
model_bin = "C:/zk/work/code/python/opencv4/opencv_tutorial_data-master/models/face_detector/opencv_face_detector_uint8.pb"
config_text = "C:/zk/work/code/python/opencv4/opencv_tutorial_data-master/models/face_detector/opencv_face_detector.pbtxt";
net = cv.dnn.readNetFromTensorflow(model=model_bin, config=config_text)
cap = cv.VideoCapture(0)
def testvodeo():
net = cv.dnn.readNetFromTensorflow(model=model_bin, config=config_text)
# 打开摄像头
# cap = cv.VideoCapture(0)
# 创建窗口并绑定鼠标回调函数
cv.namedWindow('Video')
cv.setMouseCallback('Video', mouse_callback)
while True:
# 读取摄像头图像
ret, frame = cap.read()
# 显示摄像头图像
cv.imshow('Video', frame)
# 按下ESC键退出程序
if cv.waitKey(1) == 27:
break
# 释放摄像头
cap.release()
cv.destroyAllWindows()
# 定义鼠标回调函数
def mouse_callback(event, x, y, flags, param):
if event == cv.EVENT_MBUTTONDOWN:
print('Left button clicked at: ({}, {})'.format(x, y))
while True:
ret, frame = cap.read()
h, w, c = frame.shape #
if ret is not True:
break
# NCHW
blob = cv.dnn.blobFromImage(frame, 1.0, (300, 300), (104.0, 177.0, 123.0), False, False) # 获取人脸样本数据
net.setInput(blob)
outs = net.forward() # 1x1xNx7
for detection in outs[0, 0, :, :]:
score = float(detection[2])
if score > 0.5: # 阈值越接近1越像人脸
left = detection[3] * w
top = detection[4] * h
right = detection[5] * w
bottom = detection[6] * h
ui.moveTo(left + 280, top + 380, duration=0.001)
cv.rectangle(frame, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), 2, 8, 0)
cv.imshow("Video", frame)
c = cv.waitKey(1)
if c == 27:
break
elif event == cv.EVENT_MBUTTONUP:
print('Right button clicked at: ({}, {})'.format(x, y))
while True:
# 读取摄像头图像
ret, frame = cap.read()
# 显示摄像头图像
cv.imshow('Video', frame)
# 按下ESC键退出程序
if cv.waitKey(1) == 27:
break
if __name__=='__main__':
testvodeo()