远程人脸识别之树莓派篇

树莓派python环境的安装

1、打开配置sources.list文件
sudo nano /etc/apt/sources.list
2、用#注释掉原文件内容,用以下内容取代(Ctrl + O 保存再 Ctrl + X 退出):

deb http://mirrors.tuna.tsinghua.edu.cn/raspbian/raspbian/ stretch main contrib non-free rpi
deb-src http://mirrors.tuna.tsinghua.edu.cn/raspbian/raspbian/ stretch main contrib non-free rpi

3、打开配置raspi.list文件

sudo nano /etc/apt/sources.list.d/raspi.list

4、用#注释掉原文件内容,用以下内容取代(Ctrl + O 保存再 Ctrl + X 退出):
deb http://mirror.tuna.tsinghua.edu.cn/raspberrypi/ stretch main ui deb-src http://mirror.tuna.tsinghua.edu.cn/raspberrypi/ stretch main ui
安装opencv相关的工具

// 安装build-essential、cmake、git和pkg-config
sudo apt-get install build-essential cmake git pkg-config 
// 安装jpeg格式图像工具包
sudo apt-get install libjpeg8-dev 
// 安装tif格式图像工具包
sudo apt-get install libtiff5-dev 
// 安装JPEG-2000图像工具包
sudo apt-get install libjasper-dev 
// 安装png图像工具包
sudo apt-get install libpng12-dev
//视频IO包
sudo apt-get install libavcodec-dev libavformat-dev libswscale-dev libv4l-dev
//gtk2.0
sudo apt-get install libgtk2.0-dev
//优化函数包
sudo apt-get install libatlas-base-dev gfortran
// 下载OpenCV
wget -O opencv-3.4.1.zip https://github.com/Itseez/opencv/archive/3.4.1.zip
// 解压OpenCV
unzip opencv-3.4.1.zip
// 下载OpenCV_contrib库:
wget -O opencv_contrib-3.4.1.zip https://github.com/Itseez/opencv_contrib/archive/3.4.1.zip
// 解压OpenCV_contrib库:
unzip opencv_contrib-3.4.1.zip
// 打开源码文件夹,这里以我修改文章时最新的3.4.1为例
cd opencv-3.4.1
// 新建release文件夹
mkdir release
// 进入release文件夹
cd release
sudo cmake -D CMAKE_BUILD_TYPE=RELEASE \
    -D CMAKE_INSTALL_PREFIX=/usr/local \
    -D OPENCV_EXTRA_MODULES_PATH=~/opencv_contrib-3.4.1/modules \
    -D INSTALL_PYTHON_EXAMPLES=ON \
    -D BUILD_EXAMPLES=ON ..
// 编译,以管理员身份,否则容易出错
sudo make
// 更新动态链接库
sudo ldconfig
// 安装
sudo make install
// 更新动态链接库
sudo ldconfig

现在Opencv安装好了,开始测试一下吧

# -*- coding:utf-8 -*-
import cv2
import numpy as np
cv2.namedWindow("gray")
img = np.zeros((512,512),np.uint8)#生成一张空的灰度图像
cv2.line(img,(0,0),(511,511),255,5)#绘制一条白色直线
cv2.imshow("gray",img)#显示图像
#循环等待,按q键退出
while True:
    key=cv2.waitKey(1)
    if key==ord("q"):
        break
cv2.destoryWindow("gray")

不知道怎么创建文件的,有两种方法,第一文本文件创建 nano test.py,注意后缀文件的类型.写好之后,用命令python test.py运行.第二种方法:进入文件管理器找到自己的目录之后选择在这里插入图片描述
在这里插入图片描述
配置摄像头
在这里我用的是usb摄像头,确保能用

import numpy as np
import cv2


cap = cv2.VideoCapture(0)

width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)

fourcc = cv2.VideoWriter_fourcc(*'mp4v') 
out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (width, height))

while(cap.isOpened()):
    ret, frame = cap.read()
    if ret == True:
        out.write(frame)
        gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        cv2.imshow('gray',gray)
        cv2.imshow('My Camera',frame)
        if (cv2.waitKey(1) & 0xFF) == ord('q'):
            break
    else:
        break
out.release()
cap.release()
cv2.destroyAllWindows()


在这里插入图片描述然后人脸检测

import numpy as np
import cv2
faceCascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0) 
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
fourcc = cv2.VideoWriter_fourcc(*'mp4v') 
out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (width, height))
while(cap.isOpened()):
    ret, frame = cap.read()
    if ret == True:
        out.write(frame)
        gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
        gray, 
        scaleFactor=1.2,
        minNeighbors=5,
        minSize=(20, 20)
        )
        for (x,y,w,h) in faces:
            cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
            roi_gray = gray[y:y+h, x:x+w]
            roi_color = frame[y:y+h, x:x+w]
	    
        cv2.imshow('gray',frame)
       # cv2.imshow('My Camera',frame)
        if (cv2.waitKey(1) & 0xFF) == ord('q'):
            break
    else:
        break
out.release()
cap.release()
cv2.destroyAllWindows()



在这里插入图片描述训练数据集
要注意几个地方:1 创建dataset 2 把haarvascade_frontalface_default.xml和dataset文件夹放在一起. 3 新建trainer文件夹

import cv2
import numpy as np
from PIL import Image
import os
 
# Path for face image database
path = './dataset'
 
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml");
 
# function to get the images and label data
def getImagesAndLabels(path):
 
    imagePaths = [os.path.join(path,f) for f in os.listdir(path)]     
    faceSamples=[]
    ids = []
 
    for imagePath in imagePaths:
        #print("Running now\r\n");
        PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
        img_numpy = np.array(PIL_img,'uint8')
 
        id = int(os.path.split(imagePath)[-1].split(".")[1])
        faces = detector.detectMultiScale(img_numpy)
 
        for (x,y,w,h) in faces:
            faceSamples.append(img_numpy[y:y+h,x:x+w])
            ids.append(id)
 
    return faceSamples,ids
 
print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
faces,ids = getImagesAndLabels(path)
recognizer.train(faces, np.array(ids))
 
# Save the model into trainer/trainer.yml
recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi
 
# Print the numer of faces trained and end program
print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))

在这里插入图片描述在这里插入图片描述训练好之后会在trainer中有一个yml文件
开始识别

import cv2
import numpy as np
import os
import datetime
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);

font = cv2.FONT_HERSHEY_SIMPLEX
 
#iniciate id counter
id = 0
 
# names related to ids: example ==> Marcelo: id=1,  etc
names = ['SHENG', 'MJX'] 
 
# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)# open insert camera
cam.set(3, 640) # set video widht
cam.set(4, 480) # set video height
 
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
#timeStart = time.time()
count = 0;
while True:
    #FPS = 0;
    
    #starttime = datetime.datetime.now()
    #timeStart = time.time();
    
    
    ret, img =cam.read()
    #img = cv2.flip(img, -1) # Flip vertically
    count  = count + 1
    if(count == 3):
        count = 0
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
     
        faces = faceCascade.detectMultiScale( 
            gray,
            scaleFactor = 1.2,
            minNeighbors = 5,
            minSize = (int(minW), int(minH)),
           )
     
        for(x,y,w,h) in faces:
     
            cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)#dui jiao xian yanse 2
     
            id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
     
            # Check if confidence is less them 100 ==> "0" is perfect match 
            if (confidence < 100):
                id = names[id]
                confidence = "  {0}%".format(round(100 - confidence))#baoliu xiao shu dian hou jiwei,meiyou xie moren baoliuzhengshu 
            else:
                id = "unknown"
                confidence = "  {0}%".format(round(100 - confidence))
            
            cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
            cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
           # cv2.putText(img, '0', (x+5,y-20), font, 1, (255,255,0), 3)
       # nowtime = time.time();
        #num = (nowtime - timeStart)%60  
        #cv2.putText(img, num, (x-5,y-5), font, 1, (255,255,255), 2)#show the FPS
        #endtime = datetime.datetime.now()
        #heaptime = "{0}%".format(3);
        #cv2.putText(img, heaptime, (x+5,y-20), font, 1, (255,255,0), 3)
        cv2.imshow('camera',img)
        #print (endtime - starttime).seconds
     
        k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
        if k == 27:
            break
 
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()

在这里插入图片描述更加详细全部的代码,在这里.关于人脸识别这里只是说把过程走了一下,其实每一个函数什么的都需要再花时间去消耗.

  • 6
    点赞
  • 38
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值