OpenMV4H7教程(二)人脸识别点亮LED
1、通电后,开始录入人脸,面对摄像头,绿灯亮,录入成功
2、摄像头拍摄图像,适配时,红灯亮
#人脸识别开关
import sensor, time, image #导入模块
from pyb import LED #导入模块
# Reset sensor 设定图像基本参数
sensor.reset() #初始化摄像头
sensor.set_contrast(3) #亮度-3至3
sensor.set_gainceiling(16) #增益
sensor.set_framesize(sensor.VGA) #图像格式
sensor.set_windowing((320,240))
sensor.set_pixformat(sensor.GRAYSCALE) #灰度
sensor.set_vflip(True) #图像反转
# Skip a few frames to allow the sensor settle down
sensor.skip_frames(time = 2000)
# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25) #特征点25个
print(face_cascade)
# First set of keypoints
kpts1 = None
# Find a face!
while (kpts1 == None):
img = sensor.snapshot()
img.draw_string(0, 0, "Looking for a face...")
# Find faces
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
if objects:
# Expand the ROI by 31 pixels in every direction
face = (objects[0][0]-31, objects[0][1]-31,objects[0][2]+31*2, objects[0][3]+31*2)
# Extract keypoints using the detect face size as the ROI
kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=face)
# Draw a rectangle around the first face
img.draw_rectangle(objects[0])
# Draw keypoints
print(kpts1)
img.draw_keypoints(kpts1, size=24)
img = sensor.snapshot()
LED(2).on()
time.sleep(2000)
LED(2).off()
# FPS clock
clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
# Extract keypoints from the whole frame
kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True)
if (kpts2):
# Match the first set of keypoints with the second one
c=image.match_descriptor(kpts1, kpts2, threshold=85)
match = c[6] # C[6] contains the number of matches.
if (match>7):
img.draw_rectangle(c[2:6])
img.draw_cross(c[0], c[1], size=10)
print(kpts2, "matched:%d dt:%d"%(match, c[7]))
LED(1).on()
else:
LED(1).off()
# Draw FPS
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))