从零开始写高性能的人脸识别服务器(四)
后面的代码就更简单的了,需要的同学可以去我的GitHub仓库下载源码,跑一下。
1 Web客户端
Web客户端需要注意的是,要想Html调用摄像头,请求URL必须是本地或者HTTPS的。然后为了极大限度的提高传输速度,Web前端直接传输截图的摄像头图像的像素值。交由AI微服务去解析。比较难的Web调用摄像头的方法,这个网上的资料比较少。
代码如下:
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>人脸识别</title>
</head>
<script src="./jquery-3.3.1.min.js"></script>
<style>
#capture {
position: absolute;
right: 190px;
bottom: -40px;
}
#video {
position: absolute;
right: 0;
top: 0;
}
#img {
position: absolute;
left: 0;
top: 0;
}
.auto {
position: absolute;
left: 50%;
top: 50%;
height: 320px;
margin-top: -160px;
}
#recognizeFace {
position: absolute;
left: 150px;
bottom: -40px;
}
#uploadFace {
position: absolute;
left: 280px;
bottom: -40px;
}
button {
cursor: pointer;
margin: 0 auto;
border: 1px solid #f0f0f0;
background: #5CACEE;
color: #FFF;
width: 100px;
height: 36px;
line-height: 36px;
border-radius: 8px;
text-align: center;
/*禁止选择*/
-webkit-touch-callout: none;
/* iOS Safari */
-webkit-user-select: none;
/* Chrome/Safari/Opera */
-khtml-user-select: none;
/* Konqueror */
-moz-user-select: none;
/* Firefox */
-ms-user-select: none;
/* Internet Explorer/Edge */
user-select: none;
/* Non-prefixed version, currently not supported by any browser */
}
</style>
<body>
<div class="auto">
<video id="video" width="480" height="320" autoplay></video>
<canvas id="canvas" width="480" height="320" style="display: none;"></canvas>
<img src="./body_default.png" id="img" width="480" height="320" style="margin-left: 20px;">
<div>
<button id="capture" title="点击进行拍照">拍照</button>
</div>
<div>
<button id="recognizeFace" title="是否用这张图片进行验证" onclick="recognizeFace()">识别</button>
<button id="uploadFace" title="是否用这张图片进行验证" onclick="uploadFace()">上传人脸</button>
</div>
</div>
<script>
var file, videoStream;
var formData;
//访问用户媒体设备的兼容方法
function getUserMedia(constraints, success, error) {
if (navigator.mediaDevices.getUserMedia) {
//最新的标准API
navigator.mediaDevices.getUserMedia(constraints).then(success).catch(error);
} else if (navigator.webkitGetUserMedia) {
//webkit核心浏览器
navigator.webkitGetUserMedia(constraints, success, error)
} else if (navigator.mozGetUserMedia) {
//firfox浏览器
navigator.mozGetUserMedia(constraints, success, error);
} else if (navigator.getUserMedia) {
//旧版API
navigator.getUserMedia(constraints, success, error);
}
}
let videoTag = document.getElementById('video');
let canvas = document.getElementById('canvas');
// 拿到canvas的上下文对象
let context = canvas.getContext('2d');
function success(stream) {
//兼容webkit核心浏览器
let CompatibleURL = window.URL || window.webkitURL;
//将视频流设置为video元素的源
videoStream = stream;
videoTag.srcObject = stream;
videoTag.play();
}
function error(error) {
console.log(`访问用户媒体设备失败${error.name}, ${error.message}`);
alert(`访问用户媒体设备失败${error.name}, ${error.message}`);
}
if (navigator.mediaDevices.getUserMedia || navigator.getUserMedia || navigator.webkitGetUserMedia || navigator
.mozGetUserMedia) {
//调用用户媒体设备, 访问摄像头
getUserMedia({
video: {
width: 480,
height: 320
}
}, success, error);
} else {
alert('不支持访问用户媒体');
}
// base64转文件
document.getElementById('capture').addEventListener('click', function () {
context.drawImage(videoTag, 0, 0, 480, 320);
var imageData = context.getImageData(0, 0, canvas.width, canvas.height).data;
var imgArr = [];
formData = new FormData();
for (var i = 0; i < imageData.length; i += 4) {
imgArr.push(imageData[i], imageData[i + 1], imageData[i + 2])
}
// console.log(imgArr)
formData.append("data", imgArr);
// 获取图片base64链接
var image = canvas.toDataURL('image/png');
// console.log(typeof image);
// console.log(image);
// 定义一个img
var img = document.getElementById("img");
//设置img标签显示的图像
img.src = image;
formData.append("cols", img.width);
formData.append("rows", img.height);
formData.append("personName", "");
function dataURLtoFile(dataurl, filename) {
var arr = dataurl.split(','),
mime = arr[0].match(/:(.*?);/)[1],
bstr = atob(arr[1]),
n = bstr.length,
imgData = new Uint8Array(n);
while (n--) {
imgData[n] = bstr.charCodeAt(n);
}
file = new File([imgData], filename, {
type: mime
});
return new File([imgData], filename, {
type: mime
});
}
})
function recognizeFace() {
var date1 = new Date(); //开始时间
$.ajax({
type: "POST", // 数据提交类型
url: "http://localhost:8888/checkFace", // 发送地址
data: formData, //发送数据
async: true, // 是否异步
processData: false, //processData 默认为false,当设置为true的时候,jquery ajax 提交的时候不会序列化 data,而是直接使用data
contentType: false,
success: function (data) {
var date2 = new Date(); //结束时间
var date3 = date2.getTime() - date1.getTime() //时间差的毫秒数
console.log(date3)
console.log(`${data.message}`);
alert(data.message);
},
error: function (e) {
//self.$message.warning(`${e}`);
console.log("不成功" + e);
}
});
videoStream.getTracks()[0].stop(); //结束关闭流
}
function uploadFace() {
var personName = prompt("请输入你的名字", "");
if (personName) {
//输出word的格式
formData.append("personName", personName);
$.ajax({
type: "POST", // 数据提交类型
url: "http://localhost:8888/uploadFace", // 发送地址
data: formData, //发送数据
async: true, // 是否异步
processData: false, //processData 默认为false,当设置为true的时候,jquery ajax 提交的时候不会序列化 data,而是直接使用data
contentType: false,
success: function (data) {
console.log(`${data.message}`);
alert(data.message);
},
error: function (e) {
//self.$message.warning(`${e}`);
console.log("不成功" + e);
}
});
videoStream.getTracks()[0].stop(); //结束关闭流
}
}
</script>
</body>
</html>
2 Qt客户端
Qt客户端调用Opencv读取电脑摄像头,使用Opencv先做前端的人脸检测,前端人脸检测通过之后再将图片上传到服务器。这里我不贴完整代码了,完整代码去git下载吧。这里说一下核心代码。
google::protobuf::uint32 UploadImageThread::readMessageSize(char *byteCountbuf){
google::protobuf::uint32 size;
google::protobuf::io::ArrayInputStream ais(byteCountbuf, 4);
CodedInputStream coded_input(&ais);
coded_input.ReadVarint32(&size); //解码数据头,并获取数据大小
return size;
}
//读取netty返回的数据主体
void UploadImageThread::readBody(QTcpSocket *clientSocket,google::protobuf::uint32 size, ResultProto &result){
int bytecount;
char *buffer = new char[size + 4];
// 读取整个缓冲区
try{
if ((bytecount =clientSocket->read(buffer, 4 + size)) == -1){
qDebug() << "接受数据失败 " << size << endl;
}
}
catch (double e){
qDebug() << "捕获到异常 " << size << endl;
}
//为输入分配足够的内存
google::protobuf::io::ArrayInputStream ais(buffer, size + 4);
CodedInputStream coded_input(&ais);
//读取Varint编码的无符号整形数字。不超过32位
coded_input.ReadVarint32(&size);
//在读取完消息的长度之后,设置读取数据的长度限制
google::protobuf::io::CodedInputStream::Limit msgLimit = coded_input.PushLimit(size);
//反序列化
result.ParseFromCodedStream(&coded_input);
//序列化完消息之后,调用PopLimit()来撤消限制
coded_input.PopLimit(msgLimit);
}
//上传文件的线程
void UploadImageThread::run(){
ResultProto result;
QTcpSocket *clientSocket = new QTcpSocket();
clientSocket->connectToHost(host, port);
if(clientSocket->isValid()){
int size = imageProto.ByteSizeLong() + 4;
char *message = new char[size];
google::protobuf::io::ArrayOutputStream aos(message, size);
google::protobuf::io::CodedOutputStream *coded_output = new google::protobuf::io::CodedOutputStream(&aos);
// 首先写入序列化之后数据的长度
coded_output->WriteVarint32(imageProto.ByteSizeLong());
// 序列化
imageProto.SerializeToCodedStream(coded_output);
int sendRe = clientSocket->write(message, size);
clientSocket->waitForBytesWritten();
clientSocket->flush();
if( -1 == sendRe){
result.set_status(400);
result.set_message("客户端发送数据失败!");
}
clientSocket->waitForReadyRead(-1);
char countBuffer[4];
memset(countBuffer, '\0', 4);
int bytecount;
// 一直等待服务器回信
while (true){
// 读取服务器给回复信息,countBuffer存的是长度
// protobuf消息的前四个字节是数据包的长度
bytecount = clientSocket->peek(countBuffer, 4);
if (bytecount == 0){ break; }
if (bytecount == -1){
result.set_status(404);
result.set_message("未知服务器和端口");
qDebug()<<"读取数据出错:"<<result.status()<<result.message().c_str();
break;
}
if (bytecount > 0){
// 读取服务器回复的消息体
readBody(clientSocket,readMessageSize(countBuffer), result);
qDebug()<<"检测到消息:"<<result.status()<<result.message().c_str();
break;
}
}
}
else{
result.set_status(400);
result.set_message("套接字无效!");
}
emit(recvedResultSignal(result));
clientSocket->disconnectFromHost();
clientSocket->close();
delete clientSocket;
}