jetbot 06 之实时人脸表情检测

在实时人脸检测的基础上, 做表情的实时识别真的可以。 但要有好的效果 一要有好的模型,二要有很棒的图像预处理。

三检测的人要有丰富的表情, 目光呆滞🤪的程序狗恐怕也让会算法素手无策。。 当然可以找个表情丰富的妹子鉴定以下效果。

这里只讲 模型的应用, 开始。

项目github 地址:

https://github.com/walletiger/jetson_facial_emotion_examples

 

一 把人脸检测封装成库

#darknet_fd.py

#!/usr/bin/python3
# -*- coding:utf-8 -*-
import sys

import cv2
import numpy as np
import time
from ctypes import *
from darknet import  detect_image
from darknet import  load_net_custom
from darknet import  load_meta
from darknet import  IMAGE
from darknet import network_width, network_height
from camera import JetCamera
import traceback 

class DarkNetFD(object):
    def __init__(self, config_path='yoloface-500k-v2.cfg', weight_path='yoloface-500k-v2.weights', meta_path='face.data'):
        self.net = load_net_custom(config_path.encode('utf-8'), weight_path.encode('utf-8'), 0, 1)
        self.meta = load_meta(meta_path.encode('utf-8'))
        self.thresh = .5
        self.hier_thresh = .5
        self.nms = .45

    def detect(self, img):
        image_list = [img]

        pred_height, pred_width, c = image_list[0].shape
        net_width, net_height = (network_width(self.net), network_height(self.net))
        img_list = []

        for custom_image_bgr in image_list:
            custom_image = cv2.cvtColor(custom_image_bgr, cv2.COLOR_BGR2RGB)
            custom_image = cv2.resize(
            custom_image, (net_width, net_height), interpolation=cv2.INTER_NEAREST)
            custom_image = custom_image.transpose(2, 0, 1)
            img_list.append(custom_image)

        arr = np.concatenate(img_list, axis=0)
        arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
        data = arr.ctypes.data_as(POINTER(c_float))
        im = IMAGE(net_width, net_height, c, data)

        ret_lst = detect_image(self.net, self.meta, im, self.thresh, self.hier_thresh, self.nms)
        ret_out_lst = []

        if ret_lst:
            for ret in ret_lst:
                x, y, w, h =  ret[2]
                x = x * pred_width / net_width
                y = y * pred_height / net_height 
                w = w * pred_width / net_width 
                h = h * pred_height / net_height 

                ret = (ret[0], ret[1], 
                    (int(x - w / 2) , int(y - h / 2),
                     int(x + w / 2) , int(y + h / 2)))

                ret_out_lst.append(ret)
                 
        return ret_out_lst

二 使用  keras 模型

jetson nano 下 安装 keras 方法:

https://blog.csdn.net/walletiger/article/details/109832423

 

live_emption.py 

keras + tensorflow 模型加载时占内存较高,在 jetson nano 下加载也比较慢 (检测速度没问题)。  效果上在光线比较好, 图像噪点少,表情掠夸张一点还是能认出来。 不上图了。

用的 facial_expression_model_weights.h5 

#!/usr/bin/python3
# -*- coding:utf-8 -*-
import sys

sys.path.append('/workspace/hugo_py')

import cv2
import numpy as np
import time
from ctypes import *
import traceback 
from keras.models import model_from_json
from keras. preprocessing.image import img_to_array 
from darknet_fd import DarkNetFD

#https://github.com/walletiger/jetson_nano_py/blob/master/camera.py
from camera import JetCamera

cap_w = 640
cap_h = 360
cap_fps = 10


def emotion_detect(model, img, x1, y1, x2, y2):

    try:
        detected_face = img[y1:y2, x1:x2] #crop detected face
        detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY) #transform to gray scale
        detected_face = cv2.resize(detected_face, (48, 48)) #resize to 48x48
        img_pixels = img_to_array(detected_face)
        img_pixels = np.expand_dims(img_pixels, axis = 0)
    except:
        return 


    img_pixels /= 255

    predictions = model.predict(img_pixels)

    #find max indexed array
    max_index = np.argmax(predictions[0])

    emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')
    emotion = emotions[max_index]

    cv2.putText(img, emotion, (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)



def main():
    model = model_from_json(open("keras_models/facial_expression_model_structure.json", "r").read())
    model.load_weights('keras_models/facial_expression_model_weights.h5')

    cam = JetCamera(cap_w, cap_h, cap_fps)
    fd = DarkNetFD() 

    cam.open()

    cnt = 0
    while True:
        try:
            ret, frame = cam.read()
            #print("camera read one frame ")
            if not ret:
                break

            t0 = time.time()
            res = fd.detect(frame)
            t1 = time.time()

            cnt += 1


            for ret in res:
                r = ret[2]
                #print("ret = %s, %s" % (ret, r))
                x1, y1, x2, y2 = r[0], r[1], r[2], r[3]
                emotion_detect(model, frame, x1, y1, x2, y2)
                cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 255, 0))
            t2 = time.time()

            if cnt % 10 == 0:
                print("frame cnt [%d] yoloface detect delay = %.1fms" % (cnt, (t1 - t0) * 1000))
                print("emotion detect delay = %.1fms" %(t2 - t1))

            cv2.imshow('haha', frame)
            cv2.waitKey(1)
        except:
            traceback.print_exc()
            break 

    cam.close()


if __name__ == '__main__':
    main()

三 基于 卷积神经网络 的  facial_emotion_recognition

安装方法:

pip3 install facial_emotion_recognition

模型同样是加载慢

live_emotion1.py 

#!/usr/bin/python3
# -*- coding:utf-8 -*-
import sys
sys.path.append('/workspace/hugo_py')

import cv2
import numpy as np
import time
from darknet_fd import DarkNetFD
import traceback 
from facial_emotion_recognition import EmotionRecognition

#https://github.com/walletiger/jetson_nano_py/blob/master/camera.py
from camera import JetCamera
cap_w = 640
cap_h = 360
cap_fps = 20


def emotion_detect(model, img, x1, y1, x2, y2):
    try:
        detected_face = img[y1 - 4: y2 + 4, x1 - 4: x2 + 4] #crop detected face
        model.recognise_emotion(detected_face, return_type='BGR')
        img[y1 - 4: y2 + 4, x1 - 4: x2 + 4] = detected_face #crop detected face
    except:
        traceback.print_exc()
        return 



def main():
    model = EmotionRecognition(device='gpu', gpu_id=0)
    cam = JetCamera(cap_w, cap_h, cap_fps)
    fd = DarkNetFD()
                                          
    cam.open()

    cnt = 0
    while True:
        try:
            ret, frame = cam.read()
            #print("camera read one frame ")
            if not ret:
                break

            t0 = time.time()
            res = fd.detect(frame)
            t1 = time.time()

            cnt += 1

            #if cnt % 1 == 0:
            #    print("frame cnt [%d] yoloface detect delay = %.1fms" % (cnt, (t1 - t0) * 1000))

            for ret in res:
                r = ret[2]
                #print("ret = %s, %s" % (ret, r))
                x1, y1, x2, y2 = int(r[0]), int(r[1]), int(r[2]), int(r[3])
                emotion_detect(model, frame, x1, y1, x2, y2)
                cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 255, 0))
            t2 = time.time()

            #print("emotion detect delay = %.1fms" %(t2 - t1) * 1000)

            cv2.imshow('haha', frame)
            cv2.waitKey(1)
        except:
            traceback.print_exc()
            break 

    cam.close()


if __name__ == '__main__':
    main()

四 onnx 模型emotion-ferplus-8.onnx

live_emotion2.py 

这里使用 opencv dnn 模块加载 onnx 模型。 加载速度快, 效果也比较稳定。 

 

# -*- coding: utf-8 -*-
import sys 
sys.path.append('/workspace/hugo_py')

import os
import time
import cv2
import numpy as np
from darknet_fd import DarkNetFD
import traceback 

sys.path.append('/workspace/hugo_py')

#https://github.com/walletiger/jetson_nano_py/blob/master/camera.py
from camera import JetCamera
cap_w = 640
cap_h = 360
cap_fps = 10

emtions = ['面无表情', '高兴', '吃惊', '伤心', '生气', '厌恶', '害怕', '不屑一顾']

ft2 = cv2.freetype.createFreeType2()
ft2.loadFontData("DroidSansFallback.ttf", 0)

def detect_emotion(net, img, x1, y1, x2, y2):
    padding =  4

    if y1 < padding or x1 < padding:
        return  
    face = img[y1 - padding: y2 + padding, x1 - padding: x2 + padding ]
    gray = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
    resized_face = cv2.resize(gray, (64, 64))
    procesed_face = resized_face.reshape(1, 1, 64, 64)

    t0 = time.time()
    net.setInput(procesed_face)

    Output = net.forward()
    t1 = time.time()

    #print("emotion detect  cost = %.1fms \n" % (t1 - t0) * 1000)

    # Compute softmax values for each sets of scores
    expanded = np.exp(Output - np.max(Output))

    probablities = expanded / expanded.sum()

    prob = np.squeeze(probablities)
    predicted_emotion = emtions[prob.argmax()]
    # Write predicted emotion on image
    ft2.putText(img, predicted_emotion, (x1, y1 + (1*2)), fontHeight=25, color=(0, 0, 255), thickness=-1,
                     line_type=cv2.LINE_4, bottomLeftOrigin=False)
    
    # Draw a rectangular box on the detected face
    cv2.rectangle(img,(x1,y1),(x2,y2),(0,0,255),2)


def main():
    cam = JetCamera(cap_w, cap_h, cap_fps)
    cam.open()

    model='onnx_models/emotion-ferplus-8.onnx'

    net = cv2.dnn.readNetFromONNX(model)

    
    fd = DarkNetFD()


    while True:
        ret, img = cam.read()

        if not ret:
            break 


        t0 = time.time()
        faces = fd.detect(img)
        t1 = time.time()

        #print("fd cost = %.1fms \n" % (t1 - t0) * 1000)

        #faces = fd.hog_detect(img)
        for ret in faces:
            r = ret[2]
            #print("ret = %s, %s" % (ret, r))
            x1, y1, x2, y2 = r[0], r[1], r[2], r[3]
            try:
                detect_emotion(net, img, x1, y1, x2, y2)
            except:
                traceback.print_exc()
                break 

            cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 0))


        cv2.imshow('haha', img)
        cv2.waitKey(1)

    cam.close()


if __name__ == '__main__':
    main()

 

github 上还有很多丰富的🌰, 想体验的小伙伴自己去找吧。

https://github.com/search?q=facial-expression-recognition

 

  • 1
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

walletiger

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值