利用D415读取 需要标记的人脸face_recognition的距离 Python + wind10

1、下载安装face_recognition

face_recognition 链接

2、文件结构如下

3、上代码

read_data.py

#-*-coding:utf8-*-
import os
#import cv2
#import numpy as np
import face_recognition
from read_img import endwith
import scipy.misc
 
def read_file(path):
    label_list = []
    dir_counter = 0

   
    img_encoding = [[] for i in range(5)]
    for child_dir in os.listdir(path):
         child_path = os.path.join(path, child_dir)

         for dir_image in  os.listdir(child_path):

             if endwith(dir_image,'jpg'):
                img = scipy.misc.imread(os.path.join(child_path, dir_image))
#                resized_img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
#                recolored_img = cv2.cvtColor(resized_img,cv2.COLOR_BGR2GRAY)
                img_encoding[dir_counter].append(face_recognition.face_encodings(img)[0])
#                img_encoding[dir_counter].append(face_recognition.face_encodings(img)[1])
#                img_encoding[dir_counter].append(face_recognition.face_encodings(img)[2])
                label_list.append(dir_counter)
         dir_counter += 1


    return img_encoding,label_list,dir_counter

 
def read_name_list(path):
    name_list = []
    for child_dir in os.listdir(path):
        name_list.append(child_dir)
    return name_list


 
if __name__ == '__main__':
    img_list,label_lsit,counter = read_file('./dataset')
    tt  = read_name_list('./dataset')
    print (img_list)
    print( tt)

read_img.py

import os
import cv2
 
def readAllImg(path,*suffix):
    try:
        s = os.listdir(path)
        resultArray = []
        fileName = os.path.basename(path)
        resultArray.append(fileName)

        for i in s:
            if endwith(i, suffix):
                document = os.path.join(path, i)
                img = cv2.imread(document)
                resultArray.append(img)


    except IOError:
        print ("Error")

    else:
        print ("读取成功")
        return resultArray

 
def endwith(s,*endstring):
   resultArray = map(s.endswith,endstring)
   if True in resultArray:
       return True
   else:
       return False


camera_face.py

# -*- coding: utf-8 -*-
import face_recognition
import cv2
from read_data import read_name_list
from read_data import read_file
video_capture = cv2.VideoCapture(0)
#video_capture = cv2.VideoCapture('images/2.mp4')


 

all_encoding, lable_list, counter = read_file('./dataset')
name_list = read_name_list('./dataset')
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True

while True:
    ret, frame = video_capture.read()

#    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
    small_frame = cv2.resize(frame, (0, 0), fx=1, fy=1)

    if process_this_frame:
        face_locations = face_recognition.face_locations(small_frame)
        face_encodings = face_recognition.face_encodings(small_frame, face_locations)
#        print(face_locations)
#        print(face_encodings)
        
        face_names = []
        
        for face_encoding in face_encodings:
            i = 0
            j = 0
            for t in all_encoding:
                for k in t:
                    match = face_recognition.compare_faces([k], face_encoding)
                    if match[0]:
                        name = name_list[i]
                        j=1
                i = i+1
            if j == 0:
                name = "unknown"

            face_names.append(name)
            print(i,j)     

    process_this_frame = not process_this_frame

    for (top, right, bottom, left), name in zip(face_locations, face_names):
#        top *= 4
#        right *= 4
#        bottom *= 4
#        left *= 4
        
        top *= 1
        right *= 1
        bottom *= 1
        left *= 1

        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255),  2)

        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), 2)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left+6, bottom-6), font, 1.0, (255, 255, 255), 1)

    cv2.imshow('Video', frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

video_capture.release()
cv2.destroyAllWindows()

rgb_dface.py

import pyrealsense2 as rs
import numpy as np
import cv2
import face_recognition
from read_data import read_name_list
from read_data import read_file


#video_capture = cv2.VideoCapture(0)


#读取摄像头,并识别摄像头中的人脸,进行匹配。

all_encoding, lable_list, counter = read_file('./dataset')
name_list = read_name_list('./dataset')
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True


 
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)


# Start streaming
pipeline.start(config)
try:
    while True:
 
        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()
       
        if not depth_frame or not color_frame:
            continue
 
        # Convert images to numpy arrays 把图像转换为numpy data
        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        
        # Apply colormap on depth image (image must be converted to 8-bit per pixel first) 在深度图上用颜色渲染
        depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
        
        
        
        if process_this_frame:
            face_locations = face_recognition.face_locations(color_image)
            face_encodings = face_recognition.face_encodings(color_image, face_locations)
            
            print(face_locations)
            sum=0
#            distance=0
#            centerdistance=0
            if face_locations:
                
                w=face_locations[0][2]-face_locations[0][0]
                
                ll0=face_locations[0][0]+w//2
                ll2=face_locations[0][2]+w//2
                cdistance=depth_image[ll0][ll2]
                cdistance=cdistance/1000
                print('the center of distace is',cdistance)
                
                
                l0=face_locations[0][0]+w//3
                l1=face_locations[0][1]-w//3
                l2=face_locations[0][2]-w//3
                l3=face_locations[0][3]+w//3
                w1=w//3
                for num in range(l3,l1):
                    for i in range(l0,l2):
               
                        sum=sum+depth_image[num][i].astype(np.float32)
                       
#                        print(num,i)
#                        print(depth_image[num][i] )
                distance=sum/w1/w1
                distance=distance/1000
                print('the object of distance is ',distance)
        
    
                
                
            
         
            face_names = []
            #匹配,并赋值
            for face_encoding in face_encodings:
                i = 0
                j = 0
                for t in all_encoding:
                    for k in t:
                        match = face_recognition.compare_faces([k], face_encoding)
                        if match[0]:
                            name = name_list[i]
                            j=1
                    i = i+1
                if j == 0:
                    name = "unknown"
    
                face_names.append(name)
                
        process_this_frame = not process_this_frame
        
        
        
        for (top, right, bottom, left), name in zip(face_locations, face_names):
        
            top *= 1
            right *= 1
            bottom *= 1
            left *= 1
    
            cv2.rectangle(color_image, (left, top), (right, bottom), (0, 0, 255),  2)
    
            cv2.rectangle(color_image, (left, bottom - 35), (right, bottom), (0, 0, 255), 2)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(color_image, name, (left+6, bottom-6), font, 1.0, (255, 255, 255), 1)
        
        
#        cv2.imshow('RealSense', depth_colormap)
 
        # Stack both images horizontally 把两个图片水平拼在一起
        images = np.hstack((color_image, depth_colormap))
 
        # Show images 展示一下图片
        cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('RealSense', images)
#
 
        key = cv2.waitKey(1)
        # Press esc or 'q' to close the image window
        if key & 0xFF == ord('q') or key == 27:
            cv2.destroyAllWindows()
            break
 
 
finally:
 
    # Stop streaming
    pipeline.stop()
 

4.实测图片

 

欢迎留言讨论 

  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值