博主的课程设计选题,开始在网上参考了诸多教程踩了很多坑最终得以实现,所以写下心得总结,以便后人乘凉~
https://blog.csdn.net/kyokozan/article/details/79192646
https://blog.csdn.net/LC_1994/article/details/52971408
https://blog.csdn.net/coolwriter/article/details/77825375
https://blog.csdn.net/leaves_joe/article/details/67656340
http://shumeipai.nxez.com/2017/03/16/raspberry-pi-face-recognition-system.html
https://courses.cit.cornell.edu/ece5990/ECE5990_Fall15_FinalProjects/Andre_Heil/ece5990_final_report/avh34_jr986.html
https://www.cnblogs.com/Pyrokine/p/8921285.html
以上是我主要参考的几个帖子,感谢前辈们~~
安装OpenCV的最佳方式是
照 Adrian Rosebrock 的教程来进行:《Raspbian Stretch: Install OpenCV 3 + Python on your Raspberry Pi》
(https://www.pyimagesearch.com/2017/09/04/raspbian-stretch-install-opencv-3-python-on-your-raspberry-pi/)
来进行,不过如果仅限于实现简单的人脸识别有一部分东西并不需要安装
另外,推荐使用交叉编译进行,博主在树莓派3B+上编译整整用了近4小时,很痛苦
另外,注意
树莓派摄像头模块(PiCam)千万不能热插拔,很容易烧坏
另外,你可能需要搭个梯子,或者更换apt源
下面开始:
首先安装OpenCV及其依赖的各种工具和图像视频库:
打开终端
sudo apt
-get
update
扩大TF卡的空间,如果不做这步操作,后面会出问题,比如卡死
sudo raspi-config
sudo apt -get upgrade
// 安装build-essential、cmake、git和pkg-config
sudo apt-get install build-essential cmake git pkg-config
sudo apt-get install build-essential cmake git pkg-config
// 安装jpeg格式图像工具包
sudo apt-get install libjpeg8-dev
// 安装tif格式图像工具包
sudo apt-get install libtiff5-dev
// 安装JPEG-2000图像工具包
sudo apt-get install libjasper-dev
// 安装png图像工具包
sudo apt-get install libpng12-dev
sudo apt-get install libjpeg8-dev
// 安装tif格式图像工具包
sudo apt-get install libtiff5-dev
// 安装JPEG-2000图像工具包
sudo apt-get install libjasper-dev
// 安装png图像工具包
sudo apt-get install libpng12-dev
sudo apt-get install libavcodec-dev libavformat-dev libswscale-dev libv4l-dev
安装gtk2.0(树莓派很可能下载错误,更换中科大或者清华源即可):
sudo apt-get install libgtk2.0-dev
sudo apt-get install libatlas-base-dev gfortran
执行到这里就把OpenCV的依赖包全部安装好了,之后要开始编译OpenCV源代码了,请大家用wget工具下载到用户目录下(源码要放在有执行权限的位置,不是安装位置),命令如下:
// 使用wget下载OpenCV源码,觉得慢的话可以到https://github.com/opencv/opencv/releases下载OpenCV的源代码
(tar.gz格式,需要解压好)放到用户目录下,但是OpenCV_contrib请大家使用wget,亲测直接到Github下载zip文件的话,
会有编译问题
// 下载OpenCV
wget -O opencv-3.4.1.zip https://github.com/Itseez/opencv/archive/3.4.1.zip
// 解压OpenCV
unzip opencv-3.4.1.zip
// 下载OpenCV_contrib库:
wget -O opencv_contrib-3.4.1.zip https://github.com/Itseez/opencv_contrib/archive/3.4.1.zip
// 解压OpenCV_contrib库:
unzip opencv_contrib-3.4.1.zip
找到你下载的源码文件夹并打开,tar.gz解压后文件夹名应该是opencv-3.4.1(版本号可能会变化),git方式下载的文件夹名应该是opencv。
// 打开源码文件夹
cd opencv-3.4.1
之后我们新建一个名为release的文件夹用来存放cmake编译时产生的临时文件:
// 新建
release文件夹
mkdir
release
// 进入
release文件夹
cd
release
设置cmake编译参数,安装目录默认为/usr/local ,注意参数名、等号和参数值之间不能有空格,但每行末尾“\”之前有空格,参数值最后是两个英文的点:
// CMAKE_BUILD_TYPE是编译方式,CMAKE_INSTALL_PREFIX是安装目录,OPENCV_EXTRA_MODULES_PATH是加载额外模块,INSTALL_PYTHON_EXAMPLES是安装官方python例程,BUILD_EXAMPLES是编译例程(这两个可以不加,不加编译稍微快一点点,想要C语言的例程的话,在最后一行前加参数INSTALL_C_EXAMPLES=ON \)
sudo cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_EXTRA_MODULES_PATH=~/opencv_contrib-3.4.1/modules \
-D INSTALL_PYTHON_EXAMPLES=ON \
-D BUILD_EXAMPLES=ON ..
注意 -D OPENCV_EXTRA_MODULES_PATH=~/opencv_contrib-3.4.1/modules \
-D INSTALL_PYTHON_EXAMPLES=ON \
-D BUILD_EXAMPLES=ON ..
-D INSTALL_PYTHON_EXAMPLES=ON \
-D BUILD_EXAMPLES=ON ..
这三个都可以不安装
之后开始正式编译过程
// 编译,以管理员身份,否则容易出错
sudo make
// 安装
sudo make install
// 更新动态链接库
sudo ldconfig
整个编译过程大约4小时,做好心理准备!推荐在Ubuntu下进行交叉编译,半小时左右!
// 安装
sudo
make
install
// 更新动态链接库
sudo ldconfig
// 更新动态链接库
sudo ldconfig
到这里,OpenCV的编译完成
安装PiCamera
1
2
3
|
sudo
apt-get
install
python-pip
sudo
apt-get
install
python-dev
sudo
pip
install
picamera
|
至此人脸识别所需要的准备工作已经完成,
以下是人脸识别代码
from picamera.array import PiRGBArray
from picamera import PiCamera
from functools import partial
import multiprocessing as mp
import cv2
import os
import time
### Setup #####################################################################
os.putenv( 'SDL_FBDEV', '/dev/fb0' )
resX = 320
resY = 240
cx = resX / 2
cy = resY / 2
os.system( "echo 0=150 > /dev/servoblaster" )
os.system( "echo 1=150 > /dev/servoblaster" )
xdeg = 150
ydeg = 150
# Setup the camera
camera = PiCamera()
camera.resolution = ( resX, resY )
camera.framerate = 60
# Use this as our output
rawCapture = PiRGBArray( camera, size=( resX, resY ) )
# The face cascade file to be used
face_cascade = cv2.CascadeClassifier('/home/pi/opencv-2.4.9/data/lbpcascades/lbpcascade_frontalface.xml')
t_start = time.time()
fps = 0
### Helper Functions ##########################################################
def get_faces( img ):
gray = cv2.cvtColor( img, cv2.COLOR_BGR2GRAY )
faces = face_cascade.detectMultiScale( gray )
return faces, img
def draw_frame( img, faces ):
global xdeg
global ydeg
global fps
global time_t
# Draw a rectangle around every face
for ( x, y, w, h ) in faces:
cv2.rectangle( img, ( x, y ),( x + w, y + h ), ( 200, 255, 0 ), 2 )
cv2.putText(img, "Face No." + str( len( faces ) ), ( x, y ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, ( 0, 0, 255 ), 2 )
tx = x + w/2
ty = y + h/2
if ( cx - tx > 15 and xdeg <= 190 ):
xdeg += 1
os.system( "echo 0=" + str( xdeg ) + " > /dev/servoblaster" )
elif ( cx - tx < -15 and xdeg >= 110 ):
xdeg -= 1
os.system( "echo 0=" + str( xdeg ) + " > /dev/servoblaster" )
if ( cy - ty > 15 and ydeg >= 110 ):
ydeg -= 1
os.system( "echo 1=" + str( ydeg ) + " > /dev/servoblaster" )
elif ( cy - ty < -15 and ydeg <= 190 ):
ydeg += 1
os.system( "echo 1=" + str( ydeg ) + " > /dev/servoblaster" )
# Calculate and show the FPS
fps = fps + 1
sfps = fps / (time.time() - t_start)
cv2.putText(img, "FPS : " + str( int( sfps ) ), ( 10, 10 ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, ( 0, 0, 255 ), 2 )
cv2.imshow( "Frame", img )
cv2.waitKey( 1 )
### Main ######################################################################
if __name__ == '__main__':
pool = mp.Pool( processes=4 )
fcount = 0
camera.capture( rawCapture, format="bgr" )
r1 = pool.apply_async( get_faces, [ rawCapture.array ] )
r2 = pool.apply_async( get_faces, [ rawCapture.array ] )
r3 = pool.apply_async( get_faces, [ rawCapture.array ] )
r4 = pool.apply_async( get_faces, [ rawCapture.array ] )
f1, i1 = r1.get()
f2, i2 = r2.get()
f3, i3 = r3.get()
f4, i4 = r4.get()
rawCapture.truncate( 0 )
for frame in camera.capture_continuous( rawCapture, format="bgr", use_video_port=True ):
image = frame.array
if fcount == 1:
r1 = pool.apply_async( get_faces, [ image ] )
f2, i2 = r2.get()
draw_frame( i2, f2 )
elif fcount == 2:
r2 = pool.apply_async( get_faces, [ image ] )
f3, i3 = r3.get()
draw_frame( i3, f3 )
elif fcount == 3:
r3 = pool.apply_async( get_faces, [ image ] )
f4, i4 = r4.get()
draw_frame( i4, f4 )
elif fcount == 4:
r4 = pool.apply_async( get_faces, [ image ] )
f1, i1 = r1.get()
draw_frame( i1, f1 )
fcount = 0
fcount += 1
rawCapture.truncate( 0 )
保存为.py文件直接在树莓派图形界面下打开执行