![题目要求
最终成品!
stm32f103c8+openmvH7plus
刚想整理发现,Openmv比赛时最成功的代码被存在U盘里了,造福学弟了也属于是,可里面还有电赛三人组的灰度照片,我真的笑死哈哈哈哈哈哈
mask with mask face.project
#我们需要先建立自己的图像库。我们先在OpenMV的U盘(注意一定要插sd卡)中新建一个文件夹,命名为singtown,然后在singtown文件夹中再新
#建n个名为s1,s2,s3...sn的子文件夹,其中n为整个图像库中的人数。
#人脸采集,然后,我们可以运行以下代码来采集不同的人脸样本。注意采集时尽量让背景均匀,并且让人脸尽量充满整个画面。人脸可以微笑,不笑,
#正脸,歪头,戴眼镜,不戴眼镜等。可以选择每人10-20张图片。
### 3个#号是注释的可能有用的代码import sensor, image, pyb
import sensor,image,time,pyb,os,tf,lcd
from pyb import Pin
#sensor.reset() # Initialize the camera sensor.
#sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
#sensor.set_framesize(sensor.B128X128) # or sensor.QQVGA (or others)
#sensor.set_windowing((92,112))
#sensor.skip_frames(10) # Let new settings take affect.
#sensor.skip_frames(time = 2000)
clock = time.clock()
pin1 = Pin('P1', Pin.IN, Pin.PULL_UP)
pin5 = Pin('P5', Pin.IN, Pin.PULL_UP)
P9_Out = Pin('P9',Pin.OUT_PP) # P7设置成推挽输出
pin4 = Pin('P4', Pin.IN, Pin.PULL_UP)
def maskdetection():
###import sensor, image, time, os, tf
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QQVGA2) # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240)) # Set 240x240 window.
sensor.skip_frames() # Let the camera adjust.
net = "trained.tflite"
labels = [line.rstrip('\n') for line in open("labels.txt")]
clock = time.clock()
lcd.init()
# while(True):
clock.tick()
img = sensor.snapshot()
#default settings just do one detection... change them to search the image...
for obj in tf.classify(net, img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5):
print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
img.draw_rectangle(obj.rect())
# This combines the labels and confidence values into a list of tuples
predictions_list = list(zip(labels, obj.output()))
for i in range(len(predictions_list)):
print("%s = %7.2f" % (predictions_list[i][0], predictions_list[i][1]))
print(clock.fps(), "fps")
m = (predictions_list[1][1])#mask值
if m < 0.50:
time.sleep(1)
P9_Out.high()#设置p_out引脚为高
time.sleep(1)
P9_Out.low()#设置p_out引脚为低
lcd.display(img)
#执行人脸识别
'''
key9=0
def callback_PIN9(line):
global key9
key9=1
pyb.delay(5)
extint = pyb.ExtInt(pin9, pyb.ExtInt.IRQ_FALLING, pyb.Pin.PULL_UP, callback_PIN9)
while(True):
clock.tick()
img = sensor.snapshot()
if key9==1 and pin9.value()==0:
print("key9 ok!")
num = num + 1
key9=0
按一次返回一次的按键功能
'''
def facestore():#人脸储存
RED_LED_PIN = 1
BLUE_LED_PIN = 3
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.B128X128) # or sensor.QQVGA (or others)
sensor.set_windowing((92,112))
sensor.skip_frames(10) # Let new settings take affect.
sensor.skip_frames(time = 2000)
num = 4 #设置被拍摄者序号X,第一个人的图片保存到s1文件夹,第二个人的图片保存到s2文件夹,以此类推。每次更换拍摄者时,修改num值。
n = 20 #设置每个人拍摄图片数量。
#连续拍摄n张照片,每间隔3s拍摄一次。
while(n):
#红灯亮
pyb.LED(RED_LED_PIN).on()
sensor.skip_frames(time = 3000) # Give the user time to get ready.等待3s,准备一下表情。
#红灯灭,蓝灯亮
pyb.LED(RED_LED_PIN).off()
pyb.LED(BLUE_LED_PIN).on()
#保存截取到的图片到SD卡
print(n)
sensor.snapshot().save("singtown/s%s/%s.pgm" % (num, n) ) # or "example.bmp" (or others)
n -= 1
pyb.LED(BLUE_LED_PIN).off()
print("Done! Reset the camera to see the saved image.")
#最后,我们运行以下代码,来识别当前摄像头视野内的人脸,并输出与当前对象最匹配的人。
# Face recognition with LBP descriptors.
# See Timo Ahonen's "Face Recognition with Local Binary Patterns".
# Before running the example:
# 1) Download the AT&T faces database http://www.cl.cam.ac.uk/Research/DTG/attarchive/pub/data/att_faces.zip
# 2) Exract and copy the orl_faces directory to the SD card root.
#import sensor, time, image, pyb
def facedetection():#人脸识别
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.B128X128) # or sensor.QQVGA (or others)
sensor.set_windowing((92,112))
sensor.skip_frames(10) # Let new settings take affect.
sensor.skip_frames(time = 5000) #等待5s
#SUB = "s1"
NUM_SUBJECTS = 3 #图像库中不同人数,一共6人
NUM_SUBJECTS_IMGS = 60 #每人有20张样本图片
# 拍摄当前人脸。
img = sensor.snapshot()
#img = image.Image("singtown/%s/1.pgm"%(SUB))
d0 = img.find_lbp((0, 0, img.width(), img.height()))
#d0为当前人脸的lbp特征
img = None
pmin = 999999
num=0
def min(pmin, a, s):
global num
if a<pmin:
pmin=a
num=s
return pmin
for s in range(1, NUM_SUBJECTS+1):
dist = 0
for i in range(2, NUM_SUBJECTS_IMGS+1):
img = image.Image("singtown/s%d/%d.pgm"%(s, i))
d1 = img.find_lbp((0, 0, img.width(), img.height()))
#d1为第s文件夹中的第i张图片的lbp特征
dist += image.match_descriptor(d0, d1)#计算d0 d1即样本图像与被检测人脸的特征差异度。
print("Average dist for subject %d: %d"%(s, dist/NUM_SUBJECTS_IMGS))
pmin = min(pmin, dist/NUM_SUBJECTS_IMGS, s)#特征差异度越小,被检测人脸与此样本更相似更匹配。
print(pmin)
print(num) # num为当前最匹配的人的编号。
while(True):
clock.tick()
key1 = pin1.value()
key5 = pin5.value() ##按键控制
key4 = pin4.value()
facedetection()#人脸识别
if key1 == 0:
time.sleep_ms(20)
if key1 == 0:
state = 1
#print(state)
if(state == 1):
maskdetection()#口罩检测 按键改为电平反转
if key5 == 0:
time.sleep_ms(20)
if key5 == 0:
state = 2
print(state)
if(state == 2):
facestore()#人脸储存
if key4 ==0:
time.sleep_ms(20)
if key4 == 0:
state = 3
print(state)
if(state == 3):
代码有残缺,但大意就是这样,也基本可以复原了。当时自己卡在了LCD显示字符那儿,后来发现有函数(捂脸)
stm32代码就直接上文件了!