虹膜识别系统 python实现

先上传效果图
在这里插入图片描述
在这里插入图片描述

main.py

// An highlighted block

"""
Demonstration of the GazeTracking library.
Check the README.md for complete documentation.
"""
import pandas as pd
import cv2
import dlib
import saccademodel
import fixationmodel
from imutils import resize
import os
from GazeTracking_master.gaze_tracking.gaze_tracking import GazeTracking
import matplotlib.pyplot as plt
from GazeTracking_master.gaze_tracking.FaceAligner import FaceAligner
# import calibration_
import collections
def gaze_():
# calibration_matrix = calibration_.main()
# def gaze_(path, path_csv):
    gaze = GazeTracking()
    webcam = cv2.VideoCapture(0)
    frame_cnt = 0
    Frame = [];
    Gaze = [];
    Left_gaze = [];Right_gaze = []
    calibration_points = [];
    X_gaze = []
    while True:
        # We get a new frame from the webcam
        _, frame = webcam.read()
        if not _:
            print('no frame')
            break
        # We send this frame to GazeTracking to analyze it
        # frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)#转化角度90度
        # frame = cv2.resize(src=frame, dsize=(720, 1280))#压缩尺寸
        cv2.imshow('frame',frame)
        _face_detector = dlib.get_frontal_face_detector()
        _predictor = dlib.shape_predictor("C:/Users/zhangjing/Anaconda3/envs/Django_1/GazeTracking_master/gaze_tracking/trained_models/shape_predictor_68_face_landmarks.dat")
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = _face_detector(frame)
        Fa = FaceAligner(_face_detector, _predictor, (0.35, 0.35), desiredFaceWidth=720, desiredFaceHeight=1280)
        aligned_face = Fa.align(frame, gray, faces[0])
        frame = aligned_face

        gaze.refresh(frame)
        frame,left,right = gaze.annotated_frame()
        Left_gaze.append(left);Right_gaze.append(right)

        text = ""

        if gaze.is_blinking():
            text = "Blinking"
        elif gaze.is_right():
            text = "Looking right"
        elif gaze.is_left():
            text = "Looking left"
        elif gaze.is_center():
            text = "Looking center"

        cv2.putText(frame, text, (20, 60), cv2.FONT_HERSHEY_DUPLEX, 1.4, (147, 58, 31), 2)

        left_pupil = gaze.pupil_left_coords()
        right_pupil = gaze.pupil_right_coords()
        cv2.putText(frame, "Left pupil:  " + str(left_pupil), (20, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
        cv2.putText(frame, "Right pupil: " + str(right_pupil), (20, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)

        if left_pupil == None:
            continue
        left_pupil = list(left_pupil)
        right_pupil = list(right_pupil)
        Gaze.append(left_pupil)
        # point = [left_pupil[0],left_pupil[1], left_pupil[0]*left_pupil[1], 1]
        # calibration_point = np.dot(calibration_matrix[0][0], point)
        # print(calibration_point)
        # calibration_point_1 = np.transpose(calibration_point)
        # print(calibration_point_1)
        # calibration_point_2 = np.asarray(calibration_point_1)
        # print(calibration_point_2)
        # calibration_point = list(calibration_point)
        # calibration_points.append([np.float(calibration_point[0]),abs(np.float(calibration_point[1]))])
        Frame.append(frame_cnt)

        # cv2.moveWindow("trans:" + frame, 1000, 100)
        cv2.imshow('Dome',frame)
        # cv2.imwrite("E:/ffmpeg-latest-win64-static/eye_frame_2/face/"+str(frame_cnt)+'.png', frame)
        frame_cnt = frame_cnt + 1
        # print(frame_cnt)

        if cv2.waitKey(1) == 27:
             break
        if frame_cnt% 10 ==0:
            Left_gaze = [x for x in Left_gaze if x]  # 删除空列表[]
            x_left = [x[0] for x in Left_gaze]
            x_left = collections.Counter(x_left)
            left_x =[];left_y = [];left_r =[]
            for c in x_left:
                left_x.append((c / frame_cnt)*(x_left[c]))
            y_left = [x[1] for x in Left_gaze]
            y_left = collections.Counter(y_left)
            for c in y_left:
                left_y.append((c / frame_cnt) * (y_left[c]))
            r_left = [x[2] for x in Left_gaze]
            r_left = collections.Counter(r_left)
            for c in r_left:
                left_r.append((c / frame_cnt) * (r_left[c]))
            print(int(np.sum(left_x)),int(np.sum(left_y)),int(np.sum(left_r)))





"""
        if frame_cnt%30 == 0:
            framerate = 30.0
            # for i in range(len(calibration_points)):
                # a = calibration_points[0][0]/640
                # b = calibration_points[0][1] / 360
                # x = calibration_points[i][0] / a
                # y = calibration_points[i][1] / b
                # print(x,y)
                # Gaze.append([x,y])
                # Y_gaze.append(y)

            saccad = saccademodel.fit(Gaze)
            fixation = fixationmodel.fit(Gaze)
            source_points = saccad.get('source_points')
            saccade_points = saccad.get('saccade_points')
            target_points = saccad.get('target_points')
            mean_squared_error_s = saccad.get('mean_squared_error')
            centroid = fixation.get('centroid')
            mean_squared_error_f = fixation.get('mean_squared_error')
            print(saccad)
            print('source_points:',source_points)
            print('saccade_points:',saccade_points)
            print('target_points:',target_points)
            print('mean_squared_error:',mean_squared_error_s)
            print(fixation)
            print('centroid:',centroid)
            print('mean_squared_error:', mean_squared_error_f)
            saccade_X=[];saccade_Y=[];target_X=[];target_Y=[]
            for i in range(len(saccade_points)):
                saccade_x = saccade_points[i][0]
                saccade_y = saccade_points[i][1]
                saccade_X.append(saccade_x)
                saccade_Y.append(saccade_y)
            for j in range(len(target_points)):
                target_x = target_points[j][0]
                target_y = target_points[j][1]
                target_X.append(target_x)
                target_Y.append(target_y)
            plt.figure(figsize=(12.8, 7.2))
            # plt.xlim([0,1280])
            # plt.ylim([0, 720])
            plt.plot(saccade_X,saccade_Y)
            plt.scatter(target_X,target_Y, marker='o', label='target', s=20., c='b')
            plt.scatter(centroid[0],centroid[1],marker='o', label='centroid', s=35., c='r')
            # plt.savefig('C:/Users/zhangjing/Documents/Bandicam/0_left/'+str(frame_cnt)+'.png')
            Gaze.clear()
"""

import numpy as np
def calibration(gaze_x_points, gaze_y_points, calibration_matrix):
    calibration_points = []
    for x, y in zip(gaze_x_points, gaze_y_points):
        point = [x, y, 1, 1]
        calibration_point = np.dot(calibration_matrix, point)
        calibration_point = np.transpose(calibration_point)
        calibration_point = np.asarray(calibration_point)
        calibration_points.append([int(calibration_point[0][0]), int(calibration_point[1][0])])
    return calibration_points
    # data = {'Frame':Frame,'L_pupil_X': L_pupil_X,'L_pupil_Y': L_pupil_Y, 'R_pupil_X': R_pupil_X,'R_pupil_Y': R_pupil_Y}
    # cols = ['Frame','L_pupil_X','L_pupil_Y', 'R_pupil_X','R_pupil_Y']
    # result = pd.DataFrame(data, columns=cols,)
    # result.to_csv(path_csv + 'gaze.csv',index=False)

gaze_()
var foo = 'bar';

pupil.py

// An highlighted block

import numpy as np
import cv2
import matplotlib.pyplot as plt
from scipy.misc import imresize
from skimage.morphology import erosion
from PIL import Image, ImageDraw

np.set_printoptions(threshold=np.inf)
from math import sqrt
import time
class Pupil(object):
    """
    This class detects the iris of an eye and estimates
    the position of the pupil
    """

    def __init__(self, eye_frame, threshold):
        self.iris_frame = None
        self.threshold = threshold
        self.x = None
        self.y = None
        self.r = None

        self.detect_iris(eye_frame)

    @staticmethod
    def image_processing(eye_frame, threshold):
        """Performs operations on the eye frame to isolate the iris

        Arguments:
            eye_frame (numpy.ndarray): Frame containing an eye and nothing else
            threshold (int): Threshold value used to binarize the eye frame

        Returns:
            A frame with a single element representing the iris
        """

        cv2.imshow("eye", eye_frame)
        kernel = np.ones((3, 3), np.uint8)
        new_frame = cv2.bilateralFilter(eye_frame, 10, 15, 15)
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        new_frame = clahe.apply(new_frame)
        new_frame = cv2.erode(new_frame, kernel, iterations=3)
        new_frame = cv2.threshold(new_frame, threshold, 255, cv2.THRESH_BINARY)[1]
        circles = cv2.HoughCircles(eye_frame, cv2.HOUGH_GRADIENT, 1, 1, param1=50, param2=30, minRadius=0,
                                   maxRadius=0) # image - 8位,单通道,灰度输入图像;circles - 找到的圆的输出向量。每个向量被编码为3元素的浮点向量(x,y,半径)
        #circle_storage 在C函数中,这是一个将包含找到的圆的输出序列的内存存储。 method - 使用检测方法。目前唯一实现的方法是cv_hough_gradient; dp - 累加器分辨率与图像分辨率的反比。
        # minDist - 检测到的圆的中心之间的最小距离。有助于监测相邻圆; 倒数第二个为canny边缘检测时的高阈值
                                                #倒数第一个为圆心的累加器阈值,越小检测出的圆越多。


                # eye_frame = cv2.resize(eye_frame,(0,0),fx=4, fy=4, interpolation=cv2.INTER_CUBIC)

        return new_frame,circles,eye_frame

    def detect_iris(self, eye_frame):
        """Detects the iris and estimates the position of the iris by
        calculating the centroid.

        Arguments:
            eye_frame (numpy.ndarray): Frame containing an eye and nothing else
        """
        self.iris_frame,circles,pupil_frame = self.image_processing(eye_frame, self.threshold)
        contours, _ = cv2.findContours(self.iris_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
        contours = sorted(contours, key=cv2.contourArea)
        if str(circles) == 'None':
            try:
                moments = cv2.moments(contours[-2])
                self.x = int(moments['m10'] / moments['m00'])
                self.y = int(moments['m01'] / moments['m00'])
                self.r = 15
                mask = np.zeros(pupil_frame.shape[:2], np.uint8)  # 方形掩膜
                mask = cv2.circle(mask, (self.x, self.y), 16, (255, 255, 255), -1)
                image = cv2.add(pupil_frame, np.zeros(np.shape(pupil_frame), dtype=np.uint8), mask=mask)
                m_hist = cv2.calcHist([pupil_frame], [0], mask, [256], [0, 256])  # 使用calchist计算每个像素点的频数
                plt.imshow(image, 'gray')
                plt.plot(m_hist, 'red')
                plt.savefig('E:/ffmpeg-latest-win64-static/eye_frame_2/hist/' + str(time.time()) + '.png')
                plt.clf()
                pupil_frame = cv2.threshold(pupil_frame, 25, 255, cv2.THRESH_BINARY)[1]
                cv2.imwrite('E:/ffmpeg-latest-win64-static/eye_frame_2/eye/' + str(time.time()) + '.png',
                            pupil_frame)
                # ellipse = np.zeros((row, col, dim))
                # cv2.circle(ellipse, (center_row, center_col), radius, (255, 255, 255), 2)
                # print('원이없음_iris:', self.x, self.y)
            except (IndexError, ZeroDivisionError):
                pass
        else:
            circles = np.uint16(np.around(circles))  # 把circles包含的圆心和半径的值变成整数
            if len(circles[0, :]) == 1:

                if circles[0][0][0] == 0:
                    #원이 없으면 원래 모양 잡은 방법 사용함
                    try:
                        moments = cv2.moments(contours[-2])
                        self.x = int(moments['m10'] / moments['m00'])
                        self.y = int(moments['m01'] / moments['m00'])
                        self.r = 15
                        mask = np.zeros(pupil_frame.shape[:2], np.uint8)  # 方形掩膜
                        mask = cv2.circle(mask, (self.x, self.y), 16, (255, 255, 255), -1)
                        image = cv2.add(pupil_frame, np.zeros(np.shape(pupil_frame), dtype=np.uint8), mask=mask)
                        m_hist = cv2.calcHist([pupil_frame], [0], mask, [256], [0, 256])  # 使用calchist计算每个像素点的频数
                        plt.imshow(image, 'gray')
                        plt.plot(m_hist, 'red')
                        plt.savefig('E:/ffmpeg-latest-win64-static/eye_frame_2/hist/' + str(time.time()) + '.png')
                        plt.clf()
                        pupil_frame = cv2.threshold(pupil_frame, 25, 255, cv2.THRESH_BINARY)[1]
                        cv2.imwrite('E:/ffmpeg-latest-win64-static/eye_frame_2/eye/' + str(time.time()) + '.png',
                                    pupil_frame)
                        # print('원이없음_iris:',self.x,self.y)
                    except (IndexError, ZeroDivisionError):
                        pass
                else :
                    #원이 한개 있으면

                    for i in circles[0, :]:
                        # cv2.circle(eye_frame, (i[0], i[1]), i[2], (0, 255, 0), 2)  # 画圆
                        # cv2.circle(eye_frame, (i[0], i[1]), 2, (0, 255, 0), 2)  # 画圆心

                        try:
                            moments = cv2.moments(contours[-2])
                            self.x = int(i[0])
                            self.y = int(i[1])
                            self.r = int(i[2])
                            mask = np.zeros(pupil_frame.shape[:2], np.uint8)#方形掩膜
                            mask = cv2.circle(mask,(self.x,self.y),16,(255,255,255),-1)
                            image = cv2.add(pupil_frame, np.zeros(np.shape(pupil_frame), dtype=np.uint8), mask=mask)
                            m_hist = cv2.calcHist([pupil_frame], [0], mask, [256], [0, 256])  # 使用calchist计算每个像素点的频数
                            plt.imshow(image, 'gray')
                            plt.plot(m_hist, 'red')
                            plt.savefig('E:/ffmpeg-latest-win64-static/eye_frame_2/hist/' + str(time.time()) + '.png')
                            plt.clf()
                            pupil_frame = cv2.threshold(pupil_frame, 25, 255, cv2.THRESH_BINARY)[1]
                            cv2.imwrite('E:/ffmpeg-latest-win64-static/eye_frame_2/eye/' + str(time.time()) + '.png',
                                        pupil_frame)

                        except (IndexError, ZeroDivisionError):
                            pass
            else:
                #원이 여러 개 있으면
                X=[];Y =[];R =[]
                for i in circles[0, :]:
                    # cv2.circle(eye_frame, (i[0], i[1]), i[2], (0, 255, 0), 2)  # 画圆
                    # cv2.circle(eye_frame, (i[0], i[1]), 2, (0, 255, 0), 2)  # 画圆心
                    X.append(i[0])
                    Y.append(i[1])
                    R.append(i[2])
                x_mean = np.mean(X)
                y_mean = np.mean(Y)
                r_mean = np.mean(R)
                try:
                    moments = cv2.moments(contours[-2])
                    self.x = int(x_mean)
                    self.y = int(y_mean)
                    self.r = int(r_mean)
                    mask = np.zeros(pupil_frame.shape[:2], np.uint8)  # 方形掩膜
                    mask = cv2.circle(mask, (self.x, self.y), 16, (255, 255, 255), -1)
                    image = cv2.add(pupil_frame, np.zeros(np.shape(pupil_frame), dtype=np.uint8), mask=mask)
                    m_hist = cv2.calcHist([pupil_frame], [0], mask, [256], [0, 256])  # 使用calchist计算每个像素点的频数
                    plt.imshow(image, 'gray')
                    plt.plot(m_hist,'red')
                    plt.savefig('E:/ffmpeg-latest-win64-static/eye_frame_2/hist/' + str(time.time()) + '.png')
                    plt.clf()
                    pupil_frame = cv2.threshold(pupil_frame, 25, 255, cv2.THRESH_BINARY)[1]
                    cv2.imwrite('E:/ffmpeg-latest-win64-static/eye_frame_2/eye/' + str(time.time()) + '.png',
                                pupil_frame)

                    # print('원이여러개_iris:', self.x, self.y)
                except (IndexError, ZeroDivisionError):
                    pass

var foo = 'bar';
// An highlighted block


var foo = 'bar';

其他代码请到资料库下载

  • 3
    点赞
  • 42
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值