Android基于google-play-services-vision:8.1.0实现动态人脸检测

在网上看了各种教程,Opencv鉴于C语言真的有点菜,所以找了谷歌的services做实现。引入
compile 'com.google.android.gms:play-services-vision:8.1.0'
可获取人脸的部分关键点,眼耳口鼻等。
关键点是一个人脸中一些感兴趣的点。Face Detection API不用关键点来检测人脸,但是可以检测到脸的整体轮廓之后再寻找关键点。这就是为什么发现关键点在是一个可以用FaceDetector.Builder开启的可选选项。

你可以把这些关键点作为一个额外的信息源,比如对象的眼睛在哪里,这样你就可以在你的应用中恰当的交互。可被找到的关键点共有十二个:

左右眼
左右耳
左右耳廓尖
鼻子基部
左右脸颊
嘴的左右角
嘴基部
哪些关键点是可用的取决于检测到的脸的角度。举个例子,某人的侧脸中只能检测到一个可见的眼睛,这意味着另一个眼睛是检测不到的。下面的表格概括了基于人脸的欧拉Y角度(左右方向)哪些关键点是可以被检测到的
欧拉 Y 角度 可见的关键点

小于 -36° 左眼,嘴的左半边,左耳,鼻子基部,左脸颊
-36° 到 -12° 嘴的左半边,鼻子基部,嘴的底部,右眼,左眼,左脸颊,左耳尖
-12° 到 12° 右眼,左眼,鼻子基部,左脸颊,右脸颊,嘴的左半边,嘴的右半边,嘴的底部
12° 到 36° 嘴的右半边,鼻子基部,嘴的底部,左眼,右眼,右脸颊,右耳尖
大于 36°右眼,嘴的右半边,右耳,鼻子基部,右脸颊

通过gms实现了通过bitmap的图片人脸识别,之后通过预览获取每帧的bitmap实现预览时的动态人脸检测。
mainactivity中实现预览和标记关键点的图层显示。

<?xml version="1.0" encoding="utf-8"?>
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
    xmlns:app="http://schemas.android.com/apk/res-auto"
    xmlns:tools="http://schemas.android.com/tools"
    android:layout_width="match_parent"
    android:layout_height="match_parent"
    android:orientation="vertical"
    tools:context="com.admin.facedemo.MainActivity">

    <SurfaceView
        android:id="@+id/surfaceview_camera"
        android:layout_width="match_parent"
        android:layout_height="match_parent"/>

    <com.admin.facedemo.FaceOverlayView
        android:id="@+id/imageview"
        android:layout_width="match_parent"
        android:layout_height="match_parent"
     android:scaleType="fitXY"
        />
</RelativeLayout>

MainActivity中实现预览及获取预览中的每帧图像。


public class MainActivity extends AppCompatActivity implements SurfaceHolder.Callback{
    @ViewInject(R.id.surfaceview_camera)
    private SurfaceView mSurfaceView;private FaceOverlayView imageView;
    private SurfaceHolder mSurfaceHolder;
    private Camera mCamera;
    private Camera.AutoFocusCallback mAutoFocusCallback;
    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);
        x.view().inject(this);
        mSurfaceHolder = mSurfaceView.getHolder();
        mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
        mSurfaceHolder.addCallback(this);
    }
    @Override
    public void surfaceCreated(SurfaceHolder holder) {
        mCamera = Camera.open(1);//使用静态方法open初始化camera对象,默认打开的是后置摄像头
        getPreViewImage();
        try {
            mCamera.setPreviewDisplay(mSurfaceHolder);//设置在surfaceView上显示预览
            //设置预览偏移90度,一般的设备都是90,但某些设备会偏移180
            mCamera.setDisplayOrientation(90);
            Camera.Parameters parameters = mCamera.getParameters();//得到一个已有的(默认的)参数
            /**获得屏幕分辨率**/
            Display display = this.getWindowManager().getDefaultDisplay();
            Point size = new Point();
            display.getSize(size);
            int screenWidth = size.x;
            int screenHeight = size.y;
            int[] bestResolution = Utils.getBestResolution(parameters, screenHeight, screenWidth);
            parameters.setPreviewSize(bestResolution[0], bestResolution[1]);//设置分辨率,后面有详细说明
            parameters.setPictureSize(bestResolution[0],bestResolution[1]);//设置相片尺寸
            parameters.setRotation(90);//设置照相生成的图片的方向,后面有详细说明            parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
            parameters.setFlashMode(Camera.Parameters.FLASH_MODE_OFF);//设置闪光灯模式为关      parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
//            mCamera.setParameters(parameters);//将参数赋给camera
            mCamera.startPreview();//开始预览
        } catch (IOException e) {
            //在异常处理里释放camera并置为null
            mCamera.release();
            mCamera = null;
            e.printStackTrace();
        }
    }
    @Override
    public void surfaceChanged(SurfaceHolder surfaceHolder, int i, int i1, int i2) {}
    @Override
    public void surfaceDestroyed(SurfaceHolder surfaceHolder) {
        //停止预览并释放camera对象并置为null
        mCamera.stopPreview();
        mCamera.release();
        mCamera = null;
    }

    private void getPreViewImage() {

        mCamera.setPreviewCallback(new Camera.PreviewCallback(){

            @Override
            public void onPreviewFrame(byte[] data, Camera camera) {
                Camera.Size size = camera.getParameters().getPreviewSize();
                try{
                    YuvImage image = new YuvImage(data, ImageFormat.NV21, size.width, size.height, null);
                    if(image!=null){
                        ByteArrayOutputStream stream = new ByteArrayOutputStream();
                        image.compressToJpeg(new Rect(0, 0, size.width, size.height), 80, stream);

                        Bitmap bmp = BitmapFactory.decodeByteArray(stream.toByteArray(), 0, stream.size());

                        //**********************
                        //因为图片会放生旋转,因此要对图片进行旋转到和手机在一个方向上
                        rotateMyBitmap(bmp);
                        //**********************************

                        stream.close();
                    }
                }catch(Exception ex){
                    Log.e("Sys","Error:"+ex.getMessage());
                }
            }
        });
    }

    public void rotateMyBitmap(Bitmap bmp){
        //*****旋转一下
        Matrix matrix1 = new Matrix();
        matrix1.setRotate(90);
        Bitmap faceBmp0 = Bitmap.createBitmap(bmp, 0,0, bmp.getWidth(),  bmp.getHeight(), matrix1, true);
        Matrix matrix = new Matrix();
        matrix.setScale(1, -1);//翻转X
        Bitmap faceBmp = Bitmap.createBitmap(faceBmp0, 0,0, faceBmp0.getWidth(),  faceBmp0.getHeight(), matrix, tru//*******显示一下

        imageView.setBitmap(faceBmp);}}

OverlayView实现人脸对每帧bitmap的检测

public class FaceOverlayView extends View {
    private static final String TAG = "DEBUG-WCL: " + FaceOverlayView.class.getSimpleName();
    private Bitmap mBitmap; // 图片
    private SparseArray<Face> mFaces; // 人脸数组
    // 确保图片居中
    private int mHorizonOffset; // 水平偏移
    private int mVerticalOffset; // 竖直偏移

    public FaceOverlayView(Context context) {
        this(context, null);
    }

    public FaceOverlayView(Context context, AttributeSet attrs) {
        this(context, attrs, 0);
    }

    public FaceOverlayView(Context context, AttributeSet attrs, int defStyleAttr) {
        super(context, attrs, defStyleAttr);
    }

    // 设置显示图片
    @SuppressWarnings("unused")
    public void setBitmap(Bitmap bitmap) {
        mBitmap = bitmap;
        FaceDetector detector = new FaceDetector.Builder(getContext())
                .setTrackingEnabled(true)
                .setLandmarkType(FaceDetector.ALL_LANDMARKS)
                .setMode(FaceDetector.ACCURATE_MODE)
                .build();

        if (!detector.isOperational()) {
            Log.e(TAG, "加载失败");
            return;
        } else {
            Frame frame = new Frame.Builder().setBitmap(bitmap).build();
            mFaces = detector.detect(frame);
            detector.release();
        }

        logFaceData(); // 打印人脸数据
        invalidate();  // 填充
    }

    @Override
    protected void onDraw(Canvas canvas) {
        super.onDraw(canvas);

        if ((mBitmap != null) && (mFaces != null)) {
            double scale = drawBitmap(canvas);
            drawFaceBox(canvas, scale);
            drawFaceLandmarks(canvas, scale);
        }
    }

    // 绘制图片, 返回缩放概率
    private double drawBitmap(Canvas canvas) {
        double viewWidth = canvas.getWidth(); // 显示宽度
        double viewHeight = canvas.getHeight(); // 显示高度
        double imageWidth = mBitmap.getWidth(); // 图片宽度
        double imageHeight = mBitmap.getHeight(); // 图片高度

        double wScale = viewWidth / imageWidth;
        double hScale = viewHeight / imageHeight;

        double scale;
        Rect destBounds;

        // 水平竖直缩放
        if (wScale > hScale) {
            mHorizonOffset = (int) ((viewWidth - imageWidth * hScale) / 2.0f);
            destBounds = new Rect(mHorizonOffset, 0,
                    (int) (imageWidth * hScale) + mHorizonOffset, (int) (imageHeight * hScale));
            scale = hScale;
        } else {
            mVerticalOffset = (int) ((viewHeight - imageHeight * wScale) / 2.0f);
            destBounds = new Rect(0, mVerticalOffset,
                    (int) (imageWidth * wScale), (int) (imageHeight * wScale) + mVerticalOffset);
            scale = wScale;
        }

        //canvas.drawBitmap(mBitmap, null, destBounds, null); // 添加图片

        return scale;
    }

    // 绘制脸部方形
    private void drawFaceBox(Canvas canvas, double scale) {

        // 画笔
        Paint paint = new Paint();
        paint.setColor(Color.GREEN);
        paint.setStyle(Paint.Style.STROKE);
        paint.setStrokeWidth(5);

        float left;
        float top;
        float right;
        float bottom;

        // 绘制每张脸
        for (int i = 0; i < mFaces.size(); i++) {
            Face face = mFaces.valueAt(i);

            left = (float) (face.getPosition().x * scale);
            top = (float) (face.getPosition().y * scale);
            right = (float) scale * (face.getPosition().x + face.getWidth());
            bottom = (float) scale * (face.getPosition().y + face.getHeight());

            canvas.drawRect(left + mHorizonOffset, top + mVerticalOffset,
                    right + mHorizonOffset, bottom + mVerticalOffset, paint);
        }
    }

    // 绘制脸部关键部位
    private void drawFaceLandmarks(Canvas canvas, double scale) {
        Paint paint = new Paint();
        paint.setColor(Color.YELLOW);
        paint.setStyle(Paint.Style.STROKE);
        paint.setStrokeWidth(5);

        for (int i = 0; i < mFaces.size(); i++) {
            Face face = mFaces.valueAt(i);

            for (Landmark landmark : face.getLandmarks()) {
                int cx = (int) (landmark.getPosition().x * scale);
                int cy = (int) (landmark.getPosition().y * scale);
                canvas.drawCircle(cx + mHorizonOffset, cy + mVerticalOffset, 10, paint);
            }

        }
    }

    // 输出脸部数据
    private void logFaceData() {

        float smilingProbability;
        float leftEyeOpenProbability;
        float rightEyeOpenProbability;
        float eulerY;
        float eulerZ;

        for (int i = 0; i < mFaces.size(); i++) {
            Face face = mFaces.valueAt(i);

            // 可能性
            smilingProbability = face.getIsSmilingProbability();
            leftEyeOpenProbability = face.getIsLeftEyeOpenProbability();
            rightEyeOpenProbability = face.getIsRightEyeOpenProbability();

            eulerY = face.getEulerY(); // 竖直轴偏移
            eulerZ = face.getEulerZ(); // 前后偏移

            Log.e(TAG, "脸数: " + i);
            Log.e(TAG, "微笑概率: " + smilingProbability);
            Log.e(TAG, "左眼睁开概率: " + leftEyeOpenProbability);
            Log.e(TAG, "右眼睁开概率: " + rightEyeOpenProbability);
            Log.e(TAG, "竖直轴偏移: " + eulerY);
            Log.e(TAG, "前后偏移: " + eulerZ);
            Log.e(TAG, "--------------------");
        }
    }
}

本实现对人脸定位尚不精确,且由于是对每帧图像进行检测,并在预览SurfaceView上再加一层View来显示点,所以出现一点延迟,现在在考虑使用OpenCV来做实现。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值