Flask+人脸位姿检测

学习目标:

1 将虚拟主播实验中眼动的数据(眼睛移动、头部姿态等数据),生成到Flask网页前端

2 实现一个头部姿态左右转动和上下抬头低头的检测,驱动一个vue3直方图(或其他图形),实现头部运动交互的数据可视化作品

3 实现一个头部姿态左右转动和上下抬头低头的检测,驱动数据可视化切换数据(Vue3网页,或者普通网页均可)


完整代码:

1.连接数据库

# 导⼊pymysql模块
import pymysql
# 连接database
conn = pymysql.connect(
    host="localhost",
    port=3306,
    user="root",password="123456",
    database="flask2022",
    charset="utf8")
# 得到⼀个可以执⾏SQL语句的光标对象
cursor = conn.cursor()

sql1 = "select * from map_enword"
# 按条件查询
sql = "SELECT * FROM course WHERE cid > %d" % (1)
  # 异常处理
try:
    cursor.execute(sql1)
    results = cursor.fetchall()
    print(results)
except:
    conn.rollback()
# 关闭光标对象
cursor.close()
# 关闭数据库连接
conn.close()

2.位姿检测

"""
detect face
"""
import cv2
import numpy as np
# to detect face key point
import dlib

DETECTOR = dlib.get_frontal_face_detector()
# 人脸模型数据
PREDICTOR = dlib.shape_predictor(
    r"D:\software\anaconda\python\Lib\site-packages\shape_predictor_68_face_landmarks.dat")


def face_positioning(img):
    """
    定位人脸
    计算最大面积
    """
    dets = DETECTOR(img, 0)
    if not dets:
        return None
    return max(dets, key=lambda det: (det.right() - det.left()) * (det.bottom() - det.top()))


def extract_key_points(img, position):
    """
    提取关键点
    """
    landmark_shape = PREDICTOR(img, position)
    key_points = []
    for i in range(68):
        pos = landmark_shape.part(i)
        key_points.append(np.array([pos.x, pos.y], dtype=np.float32))
    return key_points


def generate_points(key_points):
    """
    生成构造点
    """
    def center(array):
        return sum([key_points[i] for i in array]) / len(array)
    left_brow = [18, 19, 20, 21]
    right_brow = [22, 23, 24, 25]
    # 下巴
    chin = [6, 7, 8, 9, 10]
    nose = [29, 30]
    return center(left_brow + right_brow), center(chin), center(nose)


def generate_features(contruction_points):
    """
    生成特征
    """
    brow_center, chin_center, nose_center = contruction_points
    mid_edge = brow_center - chin_center
    # 斜边
    bevel_edge = brow_center - nose_center
    mid_edge_length = np.linalg.norm(mid_edge)
    # 高与底的比值
    horizontal_rotation = np.cross(
        mid_edge, bevel_edge) / mid_edge_length ** 2
    # @ 点乘
    vertical_rotation = mid_edge @ bevel_edge / mid_edge_length**2
    return np.array([horizontal_rotation, vertical_rotation])


def draw_image(h_rotation, v_rotation):
    """
    画脸

    Args:
        h_rotation: 水平旋转量
        v_rotation: 垂直旋转量
    """
    img = np.ones([512, 512], dtype=np.float32)
    face_length = 200
    center = 256, 256
    left_eye = int(220 - h_rotation *
                   face_length), int(249 + v_rotation * face_length)
    right_eye = int(292 - h_rotation *
                    face_length), int(249 + v_rotation * face_length)
    month = int(256 - h_rotation * face_length /
                2), int(310 + v_rotation * face_length / 2)
    cv2.circle(img, center, 100, 0, 1)
    cv2.circle(img, left_eye, 15, 0, 1)
    cv2.circle(img, right_eye, 15, 0, 1)
    cv2.circle(img, month, 5, 0, 1)
    return img


def extract_img_features(img):
    """
    提取图片特征
    """
    face_position = face_positioning(img)
    if not face_position:
        cv2.imshow('self', img)
        cv2.waitKey(1)
        return None
    key_points = extract_key_points(img, face_position)
    for i, (p_x, p_y) in enumerate(key_points):
        cv2.putText(img, str(i), (int(p_x), int(p_y)),
                    cv2.FONT_HERSHEY_COMPLEX, 0.25, (255, 255, 255))
    construction_points = generate_points(key_points)
    for i, (p_x, p_y) in enumerate(construction_points):
        cv2.putText(img, str(i), (int(p_x), int(p_y)),
                    cv2.FONT_HERSHEY_COMPLEX, 0.25, (255, 255, 255))
    rotation = generate_features(construction_points)
    cv2.putText(img, str(rotation),
                (int(construction_points[-1][0]),
                 int(construction_points[-1][1])),
                cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255))
    cv2.imshow('self', img)
    return rotation

def list2dict(l):
    d=[]
    for i in range(len(l)):
        a={"x":i,"y":l[i]}
        d.append(a)
    return d

if __name__ == '__main__':
    CAP = cv2.VideoCapture(0)
    # 原点特征组 my front side
    ORIGIN_FEATURE_GROUP = [-0.00899233, 0.39529446]
    FEATURE_GROUP = [0, 0]
    headh=[]
    headv=[]
    while True:
        RETVAL, IMAGE = CAP.read()
        # 翻转视频
        IMAGE = cv2.flip(IMAGE, 1)
        NEW_FEATURE_GROUP = extract_img_features(IMAGE)
        if NEW_FEATURE_GROUP is not None:
            FEATURE_GROUP = NEW_FEATURE_GROUP - ORIGIN_FEATURE_GROUP
        HORI_ROTATION, VERT_ROTATION = FEATURE_GROUP
        headh.append(HORI_ROTATION)
        headv.append(VERT_ROTATION)
        #print(HORI_ROTATION, VERT_ROTATION)
        cv2.imshow('Vtuber', draw_image(HORI_ROTATION, VERT_ROTATION))

        if cv2.waitKey(10) & 0xFF == ord('q'):
            print("close")
            break
    print(len(headh))

3.前端代码

3.1 virs0.html

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>虚拟主播</title>
</head>
<body>
    <h1>虚拟主播</h1>
    <form enctype="multipart/form-data" method="post">
        <input type="hidden" name="hmessage" value="start">
        <input type="submit" value="开始">
    </form>
   <p>按q结束</p>
</body>
</html>

3.2 virs.html

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>虚拟主播</title>
</head>
<body>
    <h2>翻转量折线图</h2>
    <h3>绿色代表水平翻转量,蓝色代表垂直翻转量</h3>
     <div class="container">

        <div class="jumbotron">
			<svg id="visualisation" width="1000" height="500"></svg>
        </div>

    </div>
<!--     <h3>竖直反转程度可视化</h3>-->
     <div class="container">

        <div class="jumbotron">
			<svg id="visualisation2" width="1000" height="500"></svg>
        </div>

    </div>
    <script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
<script>
    var data1 = '{{ featureh|tojson }}';
    var datah = eval('('+ data1 +')');
    console.log(datah)
    var vis = d3.select("#visualisation");
	     WIDTH = 1000;
	     HEIGHT = 300;
	     MARGINS = {
	         top: 20,
	         right: 20,
	         bottom: 20,
	         left: 50
	     };
		xScale = d3.scale.linear().range([MARGINS.left, WIDTH - MARGINS.right]).domain([0,200]);
		yScale = d3.scale.linear().range([HEIGHT - MARGINS.top, MARGINS.bottom]).domain([-0.5,0.5]);
		xAxis = d3.svg.axis()
		    .scale(xScale);
		yAxis = d3.svg.axis()
		    .scale(yScale)
		    .orient("left");
		vis.append("svg:g")
			.attr("transform", "translate(0," + (HEIGHT - MARGINS.bottom) + ")")
			.call(xAxis);
		vis.append("svg:g")
		    .attr("transform", "translate(" + (MARGINS.left) + ",0)")
		    .call(yAxis);
		var lineGen = d3.svg.line()
		  .x(function(d) {
			//console.log(d.x);
		    return xScale(d.x);
		  })
		  .y(function(d) {
			  //console.log(d.y);
		    return yScale(d.y);
		  });
		  vis.append('svg:path')
		    .attr('d', lineGen(datah))
		    .attr('stroke', 'green')
		    .attr('stroke-width', 2)
		    .attr('fill', 'none');

     var data2 = '{{ featurev|tojson }}';
    var datav = eval('('+ data2 +')');
    console.log(datav)
    var vis2 = d3.select("#visualisation");
	     WIDTH = 1000;
	     HEIGHT = 500;
	     MARGINS = {
	         top: 20,
	         right: 20,
	         bottom: 20,
	         left: 50
	     };
		// xScale2 = d3.scale.linear().range([MARGINS.left, WIDTH - MARGINS.right]).domain([0,200]);
		// yScale2 = d3.scale.linear().range([HEIGHT - MARGINS.top, MARGINS.bottom]).domain([-0.5,0.5]);
		// xAxis2 = d3.svg.axis()
		//     .scale(xScale2);
		// yAxis2 = d3.svg.axis()
		//     .scale(yScale2)
		//     .orient("left");
		// vis2.append("svg:g")
		// 	.attr("transform", "translate(0," + (HEIGHT - MARGINS.bottom) + ")")
		// 	.call(xAxis2);
		// vis2.append("svg:g")
		//     .attr("transform", "translate(" + (MARGINS.left) + ",0)")
		//     .call(yAxis2);
		var lineGen2 = d3.svg.line()
		  .x(function(d) {
			//console.log(d.x);
		    return xScale2(d.x);
		  })
		  .y(function(d) {
			  //console.log(d.y);
		    return yScale2(d.y);
		  });
		  vis2.append('svg:path')
		    .attr('d', lineGen(datav))
		    .attr('stroke', 'blue')
		    .attr('stroke-width', 2)
		    .attr('fill', 'none');
    //console.log(obj)
</script>
</body>
</html>

4.flask搭建框架
导入需要的包

from flask import Flask, render_template,request
app = Flask(__name__) 
import pymysql
from method.getface import *
import torch
from facenet_pytorch import *
from method.vv import *

main.py

@app.route('/virtual', methods=['POST', 'GET'])
def virtual():
    if request.method=='POST'and request.form.get("hmessage")=="start" :
        CAP = cv2.VideoCapture(0)
        # 原点特征组 my front side
        ORIGIN_FEATURE_GROUP = [-0.00899233, 0.39529446]
        FEATURE_GROUP = [0, 0]
        headh = []
        headv = []
        while True:
            RETVAL, IMAGE = CAP.read()
            # 翻转视频
            IMAGE = cv2.flip(IMAGE, 1)
            NEW_FEATURE_GROUP = extract_img_features(IMAGE)
            if NEW_FEATURE_GROUP is not None:
                FEATURE_GROUP = NEW_FEATURE_GROUP - ORIGIN_FEATURE_GROUP
            HORI_ROTATION, VERT_ROTATION = FEATURE_GROUP
            headh.append(HORI_ROTATION)
            headv.append(VERT_ROTATION)
            # print(HORI_ROTATION, VERT_ROTATION)
            cv2.imshow('Vtuber', draw_image(HORI_ROTATION, VERT_ROTATION))

            if cv2.waitKey(10) & 0xFF == ord('q'):
                print("close")
                break
        print(headh)
        print(list2dict(headv))
        return render_template("virs.html",featureh=list2dict(headh),featurev=list2dict(headv))
    return render_template("virs0.html")
if __name__ == '__main__':
    print("yes") # 控制台输出
    app.run(debug=True) # 调用hello_world(),在浏览器网页输出hello world
    print(torch)

结果展示:

首页位姿检测,使用者可前后左右翻转头部d3折线图可视化

总结

将头部姿态数据实时读取到前端,并使用d3绘制翻转量的折线图。

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值