【Opencv】基于dlib的人脸关键点检测和闭眼检测

关键点定位

#导入工具包
from collections import OrderedDict
import numpy as np
import argparse
import dlib
import cv2
#https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/
#http://dlib.net/files/

0.人脸配置字典和其他工具函数

#关键部位的点坐标  从上到下的遍历:有序字典遍历
FACIAL_LANDMARKS_68_IDXS = OrderedDict([
	("mouth", (48, 68)),
	("right_eyebrow", (17, 22)),
	("left_eyebrow", (22, 27)),
	("right_eye", (36, 42)),
	("left_eye", (42, 48)),
	("nose", (27, 36)),
	("jaw", (0, 17))
])

FACIAL_LANDMARKS_5_IDXS = OrderedDict([
	("right_eye", (2, 3)),
	("left_eye", (0, 1)),
	("nose", (4))
])

def shape_to_np(shape,dtype='int')
	''' 转换为ndarray'''
	#创建68*2
	coords = np.zeros((shape.num_parts,2),dtype=dtype)
	#遍历每一个关键点 得到坐标
	for i in range(0,shape.num_parts):
		coord[i] = (shape.part(i).x , shape.part(i).y)  #得到每组点的坐标
	return coord #返回元素为68个的列表
 
def visualize_facial_landmarks(image, shape, colors=None, alpha=0.75):
 	'''展现所有关键部位的闭包 '''
	# 创建两个copy
	# overlay and one for the final output image
	overlay = image.copy() #用来作为每个部位的闭包
	output = image.copy()  #将所有截出的闭包贴到原图上
	# 设置一些颜色区域
	if colors is None:
		'''设置的固定的颜色'''
		colors = [(19, 199, 109), (79, 76, 240), (230, 159, 23),
			(168, 100, 168), (158, 163, 32),
			(163, 38, 32), (180, 42, 220)] 
			
	# 遍历每一个区域
	for (i, name) in enumerate(FACIAL_LANDMARKS_68_IDXS.keys()):
		# 得到每一个点的坐标
		(j, k) = FACIAL_LANDMARKS_68_IDXS[name]
		pts = shape[j:k]
		# 检查位置
		if name == "jaw":
			# 用线条连起来
			for l in range(1, len(pts)):
				ptA = tuple(pts[l - 1])
				ptB = tuple(pts[l])
				cv2.line(overlay, ptA, ptB, colors[i], 2)
		# 计算凸包
		else:
			hull = cv2.convexHull(pts)
			cv2.drawContours(overlay, [hull], -1, colors[i], -1)
			
	# 叠加在原图上,可以指定比例 output是复制的没有改变的原图
	cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)
	return output

1.人脸检测器

#1人脸检测
detector = dlib.get._forntal_face_detector()  

2.关键点定位器

predictor = dlib.shape_predictor(args["shape_predictor"])

3.读入图片数据预处理

image = cv2.imread(args["image"])
(h,w) = image.shape[:2]
width = 500
r = width/float(w)
dim = (width,int(h*r))
image = cv2.resize(image,dim,interpolation = cv2.INTER_AREA)
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)

4.对检测到的人脸进行处理

rects = detector(gray,1)
#遍历检测到的框
for (i,rect) in enumerate(rects):
	#对人脸框进行关键点定位
	#转化为ndarray
	shape = predictor(gray,rect) #之前定义好关键点定位器
	shape = shape_to_np(shape)   #前面自己写的函数
	
	#遍历每一个部分
	for (name, (i, j)) in FACIAL_LANDMARKS_68_IDXS.items():
		clone = image.copy()
		cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0, 0, 255), 2)	
 		
 		#根据位置画点
 		for (x,y) in shape[i:j]:
 			cv2.circle(clone,(x,y),3,(0,0,255),-1)
		#提取ROI区域
		(x,y,w,h) = cv2.boundingRect(np.array([shape[i:j]]))
		
		roi = image[y:y+h,x:x+w]
		(h,w) = roi.shape[:2]
		width = 250
		r = width/float(w)
		dim = (width,int(h*r))
		roi = cv2.resize(roi,dim,interpolation=cv2.INTER_AREA)

		# 显示每一部分
		cv2.imshow("ROI", roi)
		cv2.imshow("Image", clone)
		cv2.waitKey(0)

	# 展示所有区域
	output = visualize_facial_landmarks(image, shape)
	cv2.imshow("Image", output)
	cv2.waitKey(0)	

疲劳检测

基于ear公式用欧式距离来计算,连续帧并判断


FACIAL_LANDMARKS_68_IDXS = OrderedDict([
	("mouth", (48, 68)),
	("right_eyebrow", (17, 22)),
	("left_eyebrow", (22, 27)),
	("right_eye", (36, 42)),
	("left_eye", (42, 48)),
	("nose", (27, 36)),
	("jaw", (0, 17))
])

# http://vision.fe.uni-lj.si/cvww2016/proceedings/papers/05.pdf
def eye_aspect_ratio(eye):
	# 计算距离,竖直的
	A = dist.euclidean(eye[1], eye[5])
	B = dist.euclidean(eye[2], eye[4])
	# 计算距离,水平的
	C = dist.euclidean(eye[0], eye[3])
	# ear值
	ear = (A + B) / (2.0 * C)
	return ear
 
# 输入参数
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
	help="path to facial landmark predictor")
ap.add_argument("-v", "--video", type=str, default="",
	help="path to input video file")
args = vars(ap.parse_args())
 
# 设置判断参数  主要检测由闭上到睁开的过程
EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 3  #连续三帧,则算为疲劳

# 初始化计数器
COUNTER = 0
TOTAL = 0

# 检测与定位工具
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])

# 分别取两个眼睛区域  其他部位不用看,只看左右眼睛
(lStart, lEnd) = FACIAL_LANDMARKS_68_IDXS["left_eye"]
(rStart, rEnd) = FACIAL_LANDMARKS_68_IDXS["right_eye"]

# 读取视频
print("[INFO] starting video stream thread...")
vs = cv2.VideoCapture(args["video"])
#vs = FileVideoStream(args["video"]).start()
time.sleep(1.0)

def shape_to_np(shape, dtype="int"):
	# 创建68*2
	coords = np.zeros((shape.num_parts, 2), dtype=dtype)
	# 遍历每一个关键点
	# 得到坐标
	for i in range(0, shape.num_parts):
		coords[i] = (shape.part(i).x, shape.part(i).y)
	return coords

# 遍历每一帧
while True:
	# 预处理
	frame = vs.read()[1]
	if frame is None:
		break
	
	(h, w) = frame.shape[:2]
	width=1200  #截取视频中的部分,将人脸变大的
	r = width / float(w)
	dim = (width, int(h * r))
	frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

	# 检测人脸
	rects = detector(gray, 0)

	# 遍历每一个检测到的人脸
	for rect in rects:
		# 获取坐标
		shape = predictor(gray, rect)
		shape = shape_to_np(shape)

		# 分别计算ear值
		leftEye = shape[lStart:lEnd]
		rightEye = shape[rStart:rEnd]
		leftEAR = eye_aspect_ratio(leftEye)
		rightEAR = eye_aspect_ratio(rightEye)

		# 算一个平均的
		ear = (leftEAR + rightEAR) / 2.0

		# 绘制眼睛区域
		leftEyeHull = cv2.convexHull(leftEye)
		rightEyeHull = cv2.convexHull(rightEye)
		cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
		cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

		# 检查是否满足阈值   将ear值的系数设为0.3
		if ear < EYE_AR_THRESH:
			COUNTER += 1

		else:
			# 如果连续几帧都是闭眼的,总数算一次
			if COUNTER >= EYE_AR_CONSEC_FRAMES:  #连续三帧都出现,将会总数+1
				TOTAL += 1

			# 重置
			COUNTER = 0

		# 显示
		cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
			cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
		cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
			cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

	cv2.imshow("Frame", frame)
	key = cv2.waitKey(100) & 0xFF
 
	if key == 27:
		break

vs.release()
cv2.destroyAllWindows()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值