前言:先说明这个不是优化人脸识别的算法,这个是解决人脸识别算法在工程运用中遇到的速度慢的问题。项目中有个需要人脸识别的功能,比较慢,学弟是这样做的,因为我目前还不懂熟悉人脸识别的算法,据学弟口述流程是这样的,用户上传的照片传到后台后,学弟调用旷视接口判断照片的可用性,如果可以的话,就再调用xx接口提取体征值,提取完体征值后保存到数据库,最后返回给用户识别的结果,目前这样有点慢。
目录
第一章 原代码(学弟写的)
1.1 controller层
接收用户上传的左、正、右脸的照片并提交给service层处理
@RequestMapping(value="/addFace",method = RequestMethod.POST)
public @ResponseBody Map<Integer,String> addFace(HttpSession session,
@RequestParam(value = "leftFace" ,required = false) MultipartFile leftFace,
@RequestParam(value = "midFace",required = false) MultipartFile midFace,
@RequestParam(value = "rightFace",required = false)MultipartFile rightFace){
String studentId = session.getAttribute("FAMILY_STUDENTID").toString();
int schoolId = Integer.parseInt(session.getAttribute("FAMILY_SCHOOLID").toString());
return faceService.uploadFace(studentId,leftFace,midFace,rightFace,schoolId);
}
1.2 service层
学弟在这里使用了FutureTask,将调用旷视接口的返回值放到了HashMap中,并在FutureTask任务结束后调用提取特征值的faceDetect,FutureTask会被提交到线程池中,最后将储存了检测结果的result返回。
private ExecutorService executor = Executors.newCachedThreadPool();
@Override
public Map<Integer,String> uploadFace(String studentId,MultipartFile left, MultipartFile mid,MultipartFile right, int schoolId){
Map<Integer,String> result = new HashMap<Integer,String>();
FutureTask ft1 = null;
FutureTask ft2 = null;
FutureTask ft3 = null;
long lefts = System.currentTimeMillis();
long mids = System.currentTimeMillis();
long rights = System.currentTimeMillis();
byte[] leftbytes;
byte[] midbytes;
byte[] rightbytes;
if(left!=null){
try {
leftbytes = left.getBytes();
String src = left.getOriginalFilename();
String prefix=src.substring(src.lastIndexOf(".")+1).toLowerCase();
//在此task中判断图片是否符合要求,把结果存到result中
FaceplusplusTask f1 = new FaceplusplusTask(leftbytes,1,result,prefix);
ft1 = new FutureTask(f1){
@Override
protected void done(){
System.out.println("left:"+(System.currentTimeMillis()-lefts));
//执行完FutureTask后执行特征值提取,并存到数据库
faceDetect(leftbytes,prefix,1,studentId);
}
};
//执行FutureTask
executor.submit(ft1);
} catch (IOException e) {
e.printStackTrace();
}
}
if(mid!=null){
try {
midbytes = mid.getBytes();
String src = mid.getOriginalFilename();
String prefix=src.substring(src.lastIndexOf(".")+1).toLowerCase();
FaceplusplusTask f2 = new FaceplusplusTask(midbytes,2,result,prefix);
ft2 = new FutureTask(f2){
@Override
protected void done(){
System.out.println("mid:"+(System.currentTimeMillis()-mids));
faceDetect(midbytes,prefix,2,studentId);
}
};
executor.submit(ft2);
} catch (IOException e) {
e.printStackTrace();
}
}
if(right!=null){
try {
rightbytes = right.getBytes();
String src = right.getOriginalFilename();
String prefix=src.substring(src.lastIndexOf(".")+1).toLowerCase();
FaceplusplusTask f3 = new FaceplusplusTask(rightbytes,3,result,prefix);
ft3 = new FutureTask<SeetaRect>(f3){
@Override
protected void done(){
System.out.println("right:"+(System.currentTimeMillis()-rights));
faceDetect(rightbytes,prefix,3,studentId);
}
};
executor.submit(ft3);
} catch (IOException e) {
e.printStackTrace();
}
}
//判断三张图片是否都已检测完毕,检测完毕返回结果
while(true){
if( (ft1!=null&&!ft1.isDone()) || (ft2!=null&&!ft2.isDone()) || (ft3!=null&&!ft3.isDone()) ){
try {
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
}else{
return result;
}
}
}
第二章 优化思路
大概分析了一下他的代码,首先FutureTask可以改成CountDownLatch,这样就不用循环查找是否完成了,同时FutureTask主要是用来做有返回值的需求的,它这个也不需要返回值,其次他这个还是要提取特征值后再返回结果,其实判断图片能否符合要求后就可以返回结果了,提取特征值可以异步来做。
所以我在这里,先把判断照片是否可用写成了CountDownLatch,这样就不用循环,然后把提取特征值通过RocketMQ作成了异步。
其实也可以不用RocketMq,直接开新线程处理,我思考了一下,如果开新线程的话,如果出现了线程处理失败不好继续处理,如果用MQ的话有消息重发等等机制,同时用RocketMQ也能多学点东西,一直没把RocketMQ用到实际的场景过。
第三章 优化后的代码
看了第二章的解析应该大概理解意思了,后面主要是代码了
3.1 service层
@Override
public Map<Integer,String> uploadFace(String studentId,MultipartFile left, MultipartFile mid,MultipartFile right, int schoolId){
Map<Integer,String> result = new HashMap<Integer,String>();
CountDownLatch start = new CountDownLatch(3);
try {
executor.submit(new FaceJudgeThread(left,studentId,1,result,start));
executor.submit(new FaceJudgeThread(mid,studentId,2,result,start));
executor.submit(new FaceJudgeThread(right,studentId,3,result,start));
start.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
return result;
}
3.2 FaceJudgeThread
把图片检测是否合理写到了线程中,图片合理把提取特征值消息封装成对象序列化后发给MQ。
public class FaceJudgeThread implements Runnable {
MultipartFile file;
int direction;
Map<Integer,String> result;
CountDownLatch countDownLatch;
String studentId;
public FaceJudgeThread(MultipartFile file,String studentId, int direction, Map<Integer, String> result,CountDownLatch countDownLatch) {
this.file = file;
this.studentId = studentId;
this.direction = direction;
this.result = result;
this.countDownLatch = countDownLatch;
}
@Override
public void run() {
if(file == null){
countDownLatch.countDown();
}
byte[] photoBytes = null;
String src = file.getOriginalFilename();
String prefix=src.substring(src.lastIndexOf(".")+1).toLowerCase();
try {
photoBytes = file.getBytes();
} catch (IOException e) {
e.printStackTrace();
}
if( (!prefix.equals("png")) && (!prefix.equals("jpg")) && (!prefix.equals("jpeg")) ){
result.put(direction,"文件格式错误");
}
FaceDetect faceDetect = new FaceDetect();
faceDetect.setBytes(photoBytes);
faceDetect.setFileType(prefix);
JSONObject detectResult = faceDetect.faceDetect();
if(detectResult==null){
result.put(direction,"检测不到人脸");
}
JSONObject attributes = detectResult.getJSONObject("attributes");
JSONObject headpose = attributes.getJSONObject("headpose");
JSONObject blurness = attributes.getJSONObject("blur").getJSONObject("blurness");
JSONObject face_rectangle = detectResult.getJSONObject("face_rectangle");
float value = blurness.getFloatValue("value");
float threshold = blurness.getFloat("threshold");
if(value>threshold){
result.put(direction,"图片太模糊");
}
//左右摇头的角度
float yaw_angle = headpose.getFloatValue("yaw_angle");
//抬头角度
float pitch_angle = headpose.getFloatValue("pitch_angle");
boolean isSuccess = true;
switch (direction){
case 1:
if(!(-70<=yaw_angle && yaw_angle <= -30)){
result.put(direction,"左侧脸图片角度不符合");
isSuccess = false;
}
break;
case 2:
if(!(( -20<=yaw_angle && yaw_angle<=20 )&&( -20<=pitch_angle && pitch_angle<=20 ))){
result.put(direction,"正脸图片角度不符合");
isSuccess = false;
}
break;
case 3:
if(!(30<=yaw_angle && yaw_angle<=70)){
result.put(direction,"右侧脸图片角度不符合");
isSuccess = false;
}
break;
default:
result.put(direction,"参数错误");
isSuccess = false;
}
if(isSuccess){
DefaultMQProducer producer = (DefaultMQProducer) SpringContextUtil.getBean("defaultMQProducer");
FaceDetectDto faceDetectDto = new FaceDetectDto(photoBytes,prefix,direction,studentId);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
ObjectOutputStream oout;
byte[] messageBytes = null;
try {
oout = new ObjectOutputStream(bout);
oout.writeObject(faceDetectDto);
oout.flush();
messageBytes = bout.toByteArray();
Message mqMessage = new Message("faceRecognitionTopic", "user-tag", messageBytes);
SendResult result = producer.send(mqMessage);
} catch (Exception e) {
e.printStackTrace();
}
result.put(direction,"成功");
}
countDownLatch.countDown();
}
}
3.3 消费者
@PostConstruct
public void consumer() {
DefaultMQPushConsumer consumer = new DefaultMQPushConsumer(consumerGroup);
consumer.setNamesrvAddr(namesrvAddr);
try {
consumer.subscribe("faceRecognitionTopic", "user-tag");
consumer.registerMessageListener((MessageListenerConcurrently) (list, context) -> {
try {
for (MessageExt messageExt : list) {
//提取特征值保存到数据库
handleMessage(messageExt);
}
} catch (Exception e) {
e.printStackTrace();
return ConsumeConcurrentlyStatus.RECONSUME_LATER; //稍后再试
}
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS; //消费成功
});
consumer.start();
} catch (Exception e) {
e.printStackTrace();
}
}
public void handleMessage(MessageExt messageExt) throws IOException, ClassNotFoundException {
//...
}