ss

 
# USAGE
# python detect_blinks.py --shape-predictor shape_predictor_68_face_landmarks.dat --video blink_detection_demo.mp4
# python detect_blinks.py --shape-predictor shape_predictor_68_face_landmarks.dat

# import the necessary packages
from scipy.spatial import distance as dist
# from imutils.video import FileVideoStream
# from imutils.video import VideoStream
from imutils import face_utils
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
import pandas as pd
import os
import sys
import warnings
if not sys.warnoptions:
    warnings.simplefilter("ignore")




def eye_aspect_ratio(eye):
	# compute the euclidean distances between the two sets of
	# vertical eye landmarks (x, y)-coordinates
	A = dist.euclidean(eye[1], eye[5])
	B = dist.euclidean(eye[2], eye[4])
	C = dist.euclidean(eye[0], eye[3])
	ear = (A + B) / (2.0 * C)
	return ear
 

def mouth_aspect_ratio_old(mouth):
	# compute the euclidean distances between the two sets of
	# vertical eye landmarks (x, y)-coordinates 
    A = dist.euclidean(mouth[1], mouth[7])
    B = dist.euclidean(mouth[2], mouth[6])
    C = dist.euclidean(mouth[3], mouth[5])
    D = dist.euclidean(mouth[0], mouth[4])
    mar = (A + B + C) / (2.0 * D)
    return mar 
 

def mouth_aspect_ratio(mouth):
    # compute the euclidean distances between the two sets of
    # vertical eye landmarks (x, y)-coordinates 
    A = dist.euclidean(mouth[1], mouth[7])
    B = dist.euclidean(mouth[2], mouth[6])
    C = dist.euclidean(mouth[3], mouth[5])
    D = dist.euclidean(mouth[0], mouth[4])
    mar = (B)* 10  / (2.0 * D)
    return mar 

 

 
def fetchTimePairs(path):
    return [[0,4.5]]
    with open(path, 'r') as f:
        line = f.readline().strip()
        timestamps = line.split(' ')
        if len(timestamps) % 2 != 0:
            raise ValueError('odd timestampes, error!')

        timePairs = []
        for idx, i in enumerate(timestamps):
            if idx % 2 ==0:
                timePairs.append( (float(timestamps[idx]), float(timestamps[idx+1])) )
        return timePairs


def getTimeVerticalLine(timeSecond, setting):

    height, width, capReader = setting.height, setting.width, setting.capReader
    correspond_framePos = capReader.time2frameIdx(timeSecond)
    x_offset = (correspond_framePos / capReader.frame_counts) * width  

    verticalLine_list = []
    for i in np.linspace(0, height, 100):
        verticalLine_list.append( (x_offset, i) )
    vertical_data = np.array([verticalLine_list]).astype(np.int32)
    return vertical_data


def drawLines(frame, pairs, setting):
    RGB = [(0,0,255), (0,255,0), (255,0,0)]
    for idx, pair in enumerate(pairs):
        vert_data_start = getTimeVerticalLine(timeSecond=pair[0], setting=setting)
        cv2.polylines(frame, vert_data_start, False, RGB[idx % 3], 2)
        vert_data_end = getTimeVerticalLine(timeSecond=pair[1], setting=setting)
        cv2.polylines(frame, vert_data_end, False, RGB[idx % 3], 2)
    return frame

 
class Draw_Setting:
    def __init__(self, height, width, capReader):
        self.height = height
        self.width = width
        self.capReader = capReader


def getTimeMask(timeSeries, timeRange):
    start, end = timeRange
    return (timeSeries < end)  &  (timeSeries > start)
 
 
def ifPassedWithinRange(full_df_info, timePairs, MOUTH_AR_THRESH):
    print('-----------------------------------------------------')
    df_info_list = []
    pairs_num = len(timePairs)
    for idx, pair in enumerate(timePairs):
        mask_single = getTimeMask(full_df_info.time, pair)
        df_info_selected = full_df_info.loc[mask_single]
        df_info_selected.loc[:, 'rangeID'] = idx
        df_info_selected.loc[:, 'GreaterThanThreshcount'] = (df_info_selected.MAR > MOUTH_AR_THRESH).sum()
        df_info_selected.loc[:, 'SmallerThanThreshcount'] = (df_info_selected.MAR < MOUTH_AR_THRESH).sum()
        df_info_list.append( df_info_selected )
    df = pd.concat(df_info_list, axis=0)
    countLargerThanThresh = (df.groupby(by='rangeID').mean().GreaterThanThreshcount>0).sum()
    countSmallerThanThresh = (df.groupby(by='rangeID').mean().SmallerThanThreshcount>0).sum()
    if countLargerThanThresh > pairs_num*0.5 and  countSmallerThanThresh > pairs_num*0.5:
        print(True, 'passed')
    return True

def drawStatistics(frame, current_time,  blink_times, mouth_times, MAR):
    cv2.putText(frame, 'Time: {:.3}'.format(cur_time), (10, 110), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    cv2.putText(frame, "Blinks: {}".format(TOTAL_blink), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    cv2.putText(frame, "MAR: {:.2f}".format(MAR), (300, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    cv2.putText(frame, "Mouth Opens: {}".format(TOTAL_Mouth), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    return frame

def savePlotFigure(args, df):
    from matplotlib import pyplot as plt
    fig = plt.figure()
    plt.plot( df.time, df.MAR)
    fig.savefig(args['purename']+'.png', dpi=fig.dpi)

def list_fullPath(dirPath):
    paths = []
    for i in os.listdir(dirPath):
        paths.append(os.path.join(dirPath, i))
    return paths

'''
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
    help="path to facial landmark predictor")
ap.add_argument("-v", "--video", type=str, default="",
    help="path to input video file")
ap.add_argument("--visualize", type=bool, default=True,
    help="visualize the detection")
args = vars(ap.parse_args())
'''




import skvideo.io
import skvideo.datasets
 
videoPath = r'D:\BaiduYunDownload\server\trunk\public\358695524688244\video\2017922113932.mp4'
reader = skvideo.io.vreader(videoPath)


 
class VideoCapture():
    def __init__(self, videoPath):
        self.reader = skvideo.io.vreader(videoPath)
        self.metadata = skvideo.io.ffprobe(videoPath)
        self.frameCounts = self.getFrameCounts()
        self.curFrameId = 0 
        self.time_total = self.getTimeTotal()
        
    def getTimeTotal(self):
        return float(self.metadata['video']['@duration'])
 
    def getFrameCounts(self):
        return int(self.metadata['video']['@nb_frames'])

    def getFPS(self):
        return float(self.metadata['video']['@r_frame_rate'])
    
    def time2frameIdx(self, timeSecond):
        return int(self.frameCounts * timeSecond/(self.time_total))

    def frameIdx2time(self, framePos):
        return float(self.time_total * framePos/self.frameCounts)
    
    

def verifyByMAR(videoPath):

    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
    args = {}
    args['video'] = os.path.basename(videoPath)
    args['video_path'] = videoPath
    args['shape_predictor'] = 'shape_predictor_68_face_landmarks.dat'
    args['visualize'] = True
    args['purename'] = args['video'].split('.')[0]
    timestamp_path = args['video'].split('.')[0] + '.txt'
    pairs = fetchTimePairs(timestamp_path)


    # initialize the frame counters and the total number of blinks
    MOUTH_AR_THRESH = 0.80
    EYE_AR_CONSEC_FRAMES =3
    COUNTER = 0
    TOTAL_blink = 0
    COUNTER_Mouth = 0
    TOTAL_Mouth = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()

    # loop over frames from the video stream    
    cap_my = VideoCapture(args['video_path'])
    start = time.time()
    mar_list = []
    frame_list = []
    time_list = []
     

    frame_pos = 0
    for frame in cap_my.reader:
        if True:
            frame_pos += 1
            if frame_pos % 3 ==0:
                height, width  = frame.shape[:2]        
                new_height = 500 
                new_width = int(width / height * new_height)
                height, width = new_height, new_width
                frame = cv2.resize(frame, (width, height))
                frame_pos += 1

                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                rects = detector(gray, 0)
                for rect in rects:
                    shape = predictor(gray, rect)
                    shape = face_utils.shape_to_np(shape)
                    outerMouth = shape[48:60]
                    innerMouth = shape[60:68]
                    mar = mouth_aspect_ratio(innerMouth)

                    mar_list.append(mar)
                    frame_list.append(frame_pos)
                    time_list.append(cap_my.frameIdx2time(frame_pos))
                    if mar > MOUTH_AR_THRESH: 
                        COUNTER_Mouth += 1
                    else:
                        if COUNTER_Mouth >= EYE_AR_CONSEC_FRAMES:
                            TOTAL_Mouth += 1
                            COUNTER_Mouth = 0
        else:
            break
         
        
    # do a bit of cleanup
    df_info = pd.DataFrame(data=np.vstack([mar_list, frame_list, time_list]).T, columns=['MAR', 'frame_ID', 'time'])
    pairs = fetchTimePairs(timestamp_path)
    pass_Result = ifPassedWithinRange(df_info, pairs, MOUTH_AR_THRESH)

    end = time.time()
    print('time spent', end - start)
    print('---------------------finished---------------------') 

    return {'mouthTimes': TOTAL_Mouth, 'ifPassed':pass_Result}


resultDict = verifyByMAR('./i7_0_2_8_9_161609.mp4')
print(resultDict)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值