Microsoft Cognitive Services APIs官网
参考的官网demo 官网demo
看了这个demo基本就会了,很简单,获取一张照片,然后检测。实时检测的话就是调用camera preview然后把每一帧当做照片,然后检测。
使用微软的Face API实现人脸的检测和识别。可以实现 性别、年龄、名字(提前把人命注册好)。只使用Face的API。
1. 获取Microsoft
没有账号就申请一个,有的话直接去这获取API key。
2. 获取Camera preview frame
这个比较简单可以直接在网上找demo。
注意:目前我用的是camera2的api,在preview的时候选择jpeg,有的机器可能不支持,要选择YUV420_888。这里只能用jpeg,用别的不行,还在研究怎么用yuv
3. 检测人脸,性别,年龄
添加库
dependencies { compile fileTree(include: ['*.jar'], dir: 'libs') androidTestCompile('com.android.support.test.espresso:espresso-core:2.2.2', { exclude group: 'com.android.support', module: 'support-annotations' }) compile 'com.android.support:appcompat-v7:25.0.0' compile 'com.microsoft.projectoxford:face:1.2.1' testCompile 'junit:junit:4.12' compile 'com.android.support:support-v4:25.0.0' compile 'com.android.support:design:25.0.0' compile files('libs/core-3.2.1.jar') compile project(':openCVLibrary2411') }
private ImageReader.OnImageAvailableListener mImageAvailableListener = new ImageReader.OnImageAvailableListener() { @Override public void onImageAvailable(ImageReader reader) { // TODO Auto-generated method stub //必须加否则图像会卡住不动 Image image = reader.acquireNextImage(); if(detectFlag&&faceFlag){ detectFlag=false; buffer = image.getPlanes()[0].getBuffer(); byte[] bytes = new byte[buffer.remaining()]; buffer.get(bytes); image.close(); new DetectionTask().execute(bytes); }else image.close(); } };
private class DetectionTask extends AsyncTask<byte[], String, Face[]> { ByteArrayInputStream is; ByteArrayOutputStream output; ByteArrayInputStream inputStream; @Override protected Face[] doInBackground(byte[]... bytes) { FaceServiceClient faceServiceClient = SampleApp.getFaceServiceClient(); try { is = new ByteArrayInputStream(bytes[0]); bitmap1=BitmapFactory.decodeStream(is); output = new ByteArrayOutputStream(); bitmap1.compress(Bitmap.CompressFormat.JPEG, 100, output); inputStream = new ByteArrayInputStream(output.toByteArray()); // Start detection. Log.i("t","start detecting "); return faceServiceClient.detect( inputStream, /*Input stream of image to detect*/ true, /* Whether to return face ID*/ true, /*Whether to return face landmarks*/ /* Which face attributes to analyze, currently we support: age,gender,headPose,smile,facialHair*/ new FaceServiceClient.FaceAttributeType[] { FaceServiceClient.FaceAttributeType.Age, FaceServiceClient.FaceAttributeType.Gender, FaceServiceClient.FaceAttributeType.Glasses, FaceServiceClient.FaceAttributeType.Smile, FaceServiceClient.FaceAttributeType.HeadPose }); } catch (Exception e) { Log.i("t","end detect exception "+e.getMessage()); release(); return null; } } @Override protected void onPostExecute(Face[] faces) { super.onPostExecute(faces); String detectionResult; if (faces != null) { detectionResult = faces.length + " face" + (faces.length != 1 ? "s" : "") + " detected"; Log.i("t", "face " + detectionResult); if(faces.length>0){ mFaces=faces; faceList=Arrays.asList(faces); List<UUID> faceIds = new ArrayList<>(); for (Face face: faceList) { faceIds.add(face.faceId); } Set<String> personGroupIds = StorageHelper.getAllPersonGroupIds(FaceDetectActivity.this); String groupId=""; for (String personGroupId: personGroupIds) { groupId=personGroupId; break; } new IdentifyTask(groupId).execute(faceIds.toArray(new UUID[faceIds.size()])); }else { mResultImage.setImageBitmap(bitmap1); } // mResultImage.setImageBitmap(ImageHelper.drawFaceRectanglesOnBitmap( // bitmap1, faces, true)); } faceFlag=true; release(); Log.i("t","end detect "); }
每次获取到图像数据就去调用微软的API来检测人脸,由于用的是免费版,所以有限制,一分钟只能访问20次,所以加了flag和计时器。(用多个key可以减少限制时间,但是只是做人脸检测,如果做人名检测就不行,人名检测的key必须和注册group的key用一样的,还有人脸检测也要用一样的key,否则会提示找不到group,所以同时检测年龄、性别、名字必须要用同一个key,和注册group的key一样)要注意的是这里微软demo中传的是
/*Input stream of image to detect*/
但是我们preview得到的是ImageReader,要转换,直接转成InputStream也不行,调用后返回格式不对,要先转成Bitmap,然后在转成InputStream。注意别oom!!!
4. 检测人名
首先得先添加人,所以要用到微软demo中的groupmanager,可以直接加到我们的项目里面。
然后在检测到人脸后直接再去检测人名。上面代码中有,可以看。
private class IdentifyTask extends AsyncTask<UUID, String, IdentifyResult[]>{ String mPersonGroupId; IdentifyTask(String personGroupId) { this.mPersonGroupId = personGroupId; } @Override protected IdentifyResult[] doInBackground(UUID... uuids) { Log.i("t","start identify "); FaceServiceClient faceServiceClient = SampleApp.getFaceServiceClient(); try{ TrainingStatus trainingStatus = faceServiceClient.getPersonGroupTrainingStatus( this.mPersonGroupId); /* personGroupId */ if (trainingStatus.status != TrainingStatus.Status.Succeeded) { return null; } // Start identification. return faceServiceClient.identity( this.mPersonGroupId, /* personGroupId */ uuids, /* faceIds */ 1); /* maxNumOfCandidatesReturned */ } catch (Exception e) { Log.i("t","end identify "+e.getMessage()); return null; } } @Override protected void onPostExecute(IdentifyResult[] identifyResults) { Log.i("t","end identify "); if(identifyResults!=null){ resultList=Arrays.asList(identifyResults); mResultImage.setImageBitmap(ImageHelper.drawFaceRectanglesOnBitmap( bitmap1, mPersonGroupId,faceList,resultList,FaceDetectActivity.this, true)); }else { mResultImage.setImageBitmap(ImageHelper.drawFaceRectanglesOnBitmap( bitmap1, mFaces, true)); } faceFlag=true; } }
这里对微软demo中的ImageHelper有做一些修改。直接复制原来的,然后改了函数的参数,这样就不对它原来的功能有变动,对里面做了一些处理。
// Draw detected face rectangles in the original image and identify person. public static Bitmap drawFaceRectanglesOnBitmap( Bitmap originalBitmap, String personGroupId, List<Face> faces, List<IdentifyResult> results, Context context, boolean drawLandmarks) { Bitmap bitmap = originalBitmap.copy(Bitmap.Config.ARGB_8888, true); Canvas canvas = new Canvas(bitmap); Paint paint = new Paint(); paint.setAntiAlias(true); paint.setStyle(Paint.Style.STROKE); paint.setColor(Color.GREEN); Paint textPaint=new Paint(); textPaint.setColor(Color.GREEN); textPaint.setTextSize(20); int stokeWidth = Math.max(originalBitmap.getWidth(), originalBitmap.getHeight()) / 100; if (stokeWidth == 0) { stokeWidth = 1; } paint.setStrokeWidth(stokeWidth); if (faces != null) { for (int i=0;i<faces.size();i++) { FaceRectangle faceRectangle = calculateFaceRectangle(bitmap, faces.get(i).faceRectangle, FACE_RECT_SCALE_RATIO); canvas.drawRect( faceRectangle.left, faceRectangle.top, faceRectangle.left + faceRectangle.width, faceRectangle.top + faceRectangle.height, paint); //draw text String name="Unknown Person"; if(results.get(i).candidates.size() > 0){ DecimalFormat formatter = new DecimalFormat("#0.00"); String personId = results.get(i).candidates.get(0).personId.toString(); String personName = StorageHelper.getPersonName( personId, personGroupId, context); String identity = "Person: " + personName + "\n" + "Confidence: " + formatter.format( results.get(i).candidates.get(0).confidence); Log.i("t"," "+identity); name=identity; } DecimalFormat formatter = new DecimalFormat("#0.0"); String text="Age: " + formatter.format(faces.get(i).faceAttributes.age) +" Gender: "+ faces.get(i).faceAttributes.gender; canvas.drawText(text,faceRectangle.left,faceRectangle.top + faceRectangle.height+20,textPaint); canvas.drawText(name,faceRectangle.left,faceRectangle.top + faceRectangle.height+40,textPaint); if (drawLandmarks) { int radius = faces.get(i).faceRectangle.width / 30; if (radius == 0) { radius = 1; } paint.setStyle(Paint.Style.FILL); paint.setStrokeWidth(radius); canvas.drawCircle( (float) faces.get(i).faceLandmarks.pupilLeft.x, (float) faces.get(i).faceLandmarks.pupilLeft.y, radius, paint); canvas.drawCircle( (float) faces.get(i).faceLandmarks.pupilRight.x, (float) faces.get(i).faceLandmarks.pupilRight.y, radius, paint); canvas.drawCircle( (float) faces.get(i).faceLandmarks.noseTip.x, (float) faces.get(i).faceLandmarks.noseTip.y, radius, paint); canvas.drawCircle( (float) faces.get(i).faceLandmarks.mouthLeft.x, (float) faces.get(i).faceLandmarks.mouthLeft.y, radius, paint); canvas.drawCircle( (float) faces.get(i).faceLandmarks.mouthRight.x, (float) faces.get(i).faceLandmarks.mouthRight.y, radius, paint); paint.setStyle(Paint.Style.STROKE); paint.setStrokeWidth(stokeWidth); } } } return bitmap; }这里是画人脸的方框以及年龄、性别,后面的人名是我加的。注意循环什么,要不人名和人对不起来。可以参考微软demo怎么做的,然后再做一些修改。