使用摄像头追踪人脸由于血液流动引起的面部色素的微小变化实现实时脉搏评估。
效果如下(演示视频):
由于这是通过比较面部色素的变化评估脉搏所以光线、人体移动、不同角度、不同电脑摄像头等因素均会影响评估效果,实验原理是面部色素对比,识别效果存在一定误差,各位小伙伴且当娱乐,代码如下:
import cv2
import numpy as np
import dlib
import time
from scipy import signal
# Constants
WINDOW_TITLE = 'Pulse Observer'
BUFFER_MAX_SIZE = 500 # Number of recent ROI average values to store
MAX_VALUES_TO_GRAPH = 50 # Number of recent ROI average values to show in the pulse graph
MIN_HZ = 0.83 # 50 BPM - minimum allowed heart rate
MAX_HZ = 3.33 # 200 BPM - maximum allowed heart rate
MIN_FRAMES = 100 # Minimum number of frames required before heart rate is computed. Higher values are slower, but
# more accurate.
DEBUG_MODE = False
# Creates the specified Butterworth filter and applies it.
def butterworth_filter(data, low, high, sample_rate, order=5):
nyquist_rate = sample_rate * 0.5
low /= nyquist_rate
high /= nyquist_rate
b, a = signal.butter(order, [low, high], btype='band')
return signal.lfilter(b, a, data)
# Gets the region of interest for the forehead.
def get_forehead_roi(face_points):
# Store the points in a Numpy array so we can easily get the min and max for x and y via slicing
points = np.zeros((len(face_points.parts()), 2))
for i, part in enumerate(face_points.parts()):
points[i] = (part.x, part.y)
min_x = int(points[21, 0])
min_y = int(min(points[21, 1], points[22, 1]))
max_x = int(points[22, 0])
max_y = int(max(points[21, 1], points[22, 1]))
left = min_x
right = max_x
top = min_y - (max_x - min_x)
bottom = max_y * 0.98
return int(left), int(right), int(top), int(bottom)
# Gets the region of interest for the nose.
def get_nose_roi(face_points):
points = np.zeros((len(face_points.parts()), 2))
for i, part in enumerate(face_points.parts()):
points[i] = (part.x, part.y)
# Nose and cheeks
min_x = int(points[36, 0])
min_y = int(points[28, 1])
max_x = int(points[45, 0])
max_y = int(points[33, 1])
left = min_x
right = max_x
top = min_y + (min_y * 0.02)
bottom = max_y + (max_y * 0.02)
return int(left), int(right), int(top), int(bottom)
# Gets region of interest that includes forehead, eyes, and nose.
# Note: Combination of forehead and nose performs better. This is probably because this ROI includes eyes,
# and eye blinking adds noise.
def get_full_roi(face_points):
points = np.zeros((len(face_points.parts()), 2))
for i, part in enu