#include <jni.h>
#include
#include “opencv2/opencv.hpp”
#include “android/bitmap.h”
#include “android/log.h”
//使用命名空间
using namespace cv;
using namespace dnn;
/**
- 加载人脸识别的分类器文件
*/
CascadeClassifier cascadeClassifier;
extern “C”
JNIEXPORT void JNICALL
Java_com_hxg_ndkface_FaceDetection_loadCascade(JNIEnv *env, jobject instance, jstring file_path) {
const char *filePath = env->GetStringUTFChars(file_path, 0);
cascadeClassifier.load(filePath);
__android_log_print(ANDROID_LOG_INFO, “TTTTT”, “%s”, “分类器文件加载成功”);
env->ReleaseStringUTFChars(file_path, filePath);
}
- 只检测是否有人脸
/**
- 只检测是否有人脸
*/
extern “C”
JNIEXPORT jboolean JNICALL
Java_com_hxg_ndkface_FaceDetection_faceDetection(JNIEnv *env, jobject thiz, jobject bitmap) {
//检测人脸,opencv 有个关键类 是Mat open只会处理Mat android里面是Bitmap
//1.Bitmap转成opencv能操作的C++对象Mat
Mat mat;
bitmap2Mat(env, mat, bitmap);
//处理灰度图,提高效率
Mat gray_mat;
__android_log_print(ANDROID_LOG_INFO, “TTTTT”, “%s”, “处理灰度图”);
cvtColor(mat, gray_mat, COLOR_BGRA2GRAY);
__android_log_print(ANDROID_LOG_INFO, “TTTTT”, “%s”, “再次处理 直方均衡补偿”);
//再次处理 直方均衡补偿
Mat equalize_mat;
equalizeHist(gray_mat, equalize_mat);
//识别人脸,要加载人脸分类器文件
std::vector
cascadeClassifier.detectMultiScale(equalize_mat, faces, 1.1, 3, CV_HAAR_SCALE_IMAGE,
Size(30, 30));
__android_log_print(ANDROID_LOG_INFO, “TTTTT”, “人脸个数:%d”, faces.size());
if (faces.size() == 1) {
return true;
}
return false;
}
- 检测有无人脸,并保存到文件夹
/**
- 检测有无人脸,并保存到文件夹
*/
extern “C”
JNIEXPORT jint JNICALL
Java_com_hxg_ndkface_FaceDetection_faceDetectionSaveInfo(JNIEnv *env, jobject instance,
jstring name,
jobject bitmap) {
const char *filePath = env->GetStringUTFChars(name, 0);
//检测人脸,opencv 有个关键类 是Mat open只会处理Mat android里面是Bitmap
//1.Bitmap转成opencv能操作的C++对象Mat
Mat mat;
bitmap2Mat(env, mat, bitmap);
//处理灰度图,提高效率
Mat gray_mat;
cvtColor(mat, gray_mat, COLOR_BGRA2GRAY);
//再次处理 直方均衡补偿
Mat equalize_mat;
equalizeHist(gray_mat, equalize_mat);
//识别人脸,要加载人脸分类器文件
std::vector
cascadeClassifier.detectMultiScale(equalize_mat, faces, 1.1, 5, 0 | CV_HAAR_SCALE_IMAGE,
Size(160, 160));
__android_log_print(ANDROID_LOG_INFO, “TTTTT”, “人脸个数:%d”, faces.size());
if (faces.size() == 1) {
Rect faceRect = faces[0];
//在人脸部分画个图
rectangle(mat, faceRect, Scalar(255, 0, 0), 3);
__android_log_print(ANDROID_LOG_ERROR, “TTTTT”, “人脸个数:%s”, “在人脸部分画个图”);
//把mat我们又放到bitmap中
mat2Bitmap(env, mat, bitmap);
//保存人脸信息Mat,图片jpg
Mat saveMat = Mat(equalize_mat, faceRect);
//保存face_info_mat
imwrite(filePath, equalize_mat);
return 1;
}
env->ReleaseStringUTFChars(name, filePath);
return 0;
}
- 人脸对比
/**
*人脸对比
*/
extern “C”
JNIEXPORT jdouble JNICALL
Java_com_hxg_ndkface_FaceDetection_histogramMatch(JNIEnv *env, jobject instance, jobject bitmap1,
jobject bitmap2) {
//1.Bitmap转成opencv能操作的C++对象Mat
Mat mat, mat1;
bitmap2Mat(env, mat, bitmap1);
bitmap2Mat(env, mat1, bitmap2);
// 转灰度矩阵
cvtColor(mat, mat, COLOR_BGR2HSV);
cvtColor(mat1, mat1, COLOR_BGR2HSV);
int channels[] = {0, 1};
int histsize[] = {180, 255};
float r1[] = {0, 180};
float r2[] = {0, 255};
const float *ranges[] = {r1, r2};
Mat hist1, hist2;
calcHist(&mat, 3, channels, Mat(), hist1, 2, histsize, ranges, true);
//https://www.cnblogs.com/bjxqmy/p/12292421.html
normalize(hist1, hist1, 1, 0, NORM_L1);
calcHist(&mat1, 3, channels, Mat(), hist2, 2, histsize, ranges, true);
normalize(hist2, hist2, 1, 0, NORM_L1);
double similarity = compareHist(hist1, hist2, HISTCMP_CORREL);
__android_log_print(ANDROID_LOG_ERROR, “TTTTT”, “相识度:%f”, similarity);
return similarity;
}
- Dnn模式的人脸识别,并抠图
private void copyCaseCadeFilePbtxt() {
InputStream is = null;
FileOutputStream os = null;
try {
// load cascade file from application resources
is = getResources().openRawResource(R.raw.opencv_face_detector);
File cascadeDir = getDir(“cascade”, Context.MODE_PRIVATE);
mCascadeFile = new File(cascadeDir, “opencv_face_detector.pbtxt”);
if (mCascadeFile.exists()) return;
os = new FileOutputStream(mCascadeFile);
byte[] buffer = new byte[1024 * 1024];
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
os.write(buffer, 0, bytesRead);
}
is.close();
os.close();
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
if (is != null) {
is.close();
}
if (os != null) {
os.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
private void copyCaseCadeFileUint8() {
InputStream is = null;
FileOutputStream os = null;
try {
// load cascade file from application resources
is = getResources().openRawResource(R.raw.opencv_face_detector_uint8);
File cascadeDir = getDir(“cascade”, Context.MODE_PRIVATE);
mCascadeFile = new File(cascadeDir, “opencv_face_detector_uint8.pb”);
if (mCascadeFile.exists()) return;
os = new FileOutputStream(mCascadeFile);
byte[] buffer = new byte[1024 * 1024];
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
os.write(buffer, 0, bytesRead);
}
is.close();
os.close();
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
if (is != null) {
is.close();
}
if (os != null) {
os.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
/**
*Dnn模式的人脸识别,并抠图
*/
extern “C”
JNIEXPORT jboolean JNICALL
Java_com_hxg_ndkface_FaceDetection_faceDnnDetection(JNIEnv *env, jobject instance,
jstring model_binary,
jstring model_desc,
jstring checkPath,
jstring resultPath) {
const char *model_binary_path = env->GetStringUTFChars(model_binary, 0);
const char *model_desc_path = env->GetStringUTFChars(model_desc, 0);
const char *check_path = env->GetStringUTFChars(checkPath, 0);
const char *result_path = env->GetStringUTFChars(resultPath, 0);
Net net = readNetFromTensorflow(model_binary_path, model_desc_path);
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(DNN_TARGET_CPU);
if (net.empty()) {
__android_log_print(ANDROID_LOG_ERROR, “TTTTT”, “%s”, “could not load net…”);
return false;
}
Mat frame = imread(check_path); //读入检测文件
__android_log_print(ANDROID_LOG_ERROR, “TTTTT”, “%s”, “输入数据调整”);
// 输入数据调整
Mat inputBlob = blobFromImage(frame, 1.0,
Size(300, 300), Scalar(104.0, 177.0, 123.0), false, false);
net.setInput(inputBlob, “data”);
// 人脸检测
Mat detection = net.forward(“detection_out”);
Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr());
Mat face_area;
for (int i = 0; i < detectionMat.rows; i++) {
// 置信度 0~1之间
float confidence = detectionMat.at(i, 2);
if (confidence > 0.7) {
//count++;
int xLeftBottom = static_cast(detectionMat.at(i, 3) * frame.cols);
int yLeftBottom = static_cast(detectionMat.at(i, 4) * frame.rows);
int xRightTop = static_cast(detectionMat.at(i, 5) * frame.cols);
int yRightTop = static_cast(detectionMat.at(i, 6) * frame.rows);
Rect object((int) xLeftBottom, (int) yLeftBottom,
(int) (xRightTop - xLeftBottom),
(int) (yRightTop - yLeftBottom));
face_area = frame(object); //扣出图片
rectangle(frame, object, Scalar(0, 255, 0)); //画框
}
}
imwrite(result_path, face_area); //写出文件
env->ReleaseStringUTFChars(model_binary, model_binary_path);
env->ReleaseStringUTFChars(model_desc, model_desc_path);
env->ReleaseStringUTFChars(checkPath, check_path);
env->ReleaseStringUTFChars(resultPath, result_path);
return true;
}
- Bitmap和Mat互转
/**
-
Bitmap转成opencv能操作的C++对象Mat
-
@param env
-
@param mat
-
@param bitmap
*/
void bitmap2Mat(JNIEnv *env, Mat &mat, jobject bitmap) {
//Mat 里面有个type :CV_8UC4刚好对上我们的Bitmap中的ARGB_8888 , CV_8UC2对应Bitmap中的RGB_555
//获取 bitmap 信息
AndroidBitmapInfo info;
void *pixels;
try {
// AndroidBitmap_getInfo(env, bitmap, &info);
//锁定Bitmap画布
// AndroidBitmap_lockPixels(env, bitmap, &pixels);
CV_Assert(AndroidBitmap_getInfo(env, bitmap, &info) >= 0);
CV_Assert(info.format == ANDROID_BITMAP_FORMAT_RGBA_8888 ||
info.format == ANDROID_BITMAP_FORMAT_RGB_565);
CV_Assert(AndroidBitmap_lockPixels(env, bitmap, &pixels) >= 0);
CV_Assert(pixels);
//指定mat的宽高type BGRA
mat.create(info.height, info.width, CV_8UC4);
if (info.format == ANDROID_BITMAP_FORMAT_RGBA_8888) {
//对应mat应该是CV_8UC4
Mat temp(info.height, info.width, CV_8UC4, pixels);
//把数据temp复制到mat里面
temp.copyTo(mat);
} else if (info.format == ANDROID_BITMAP_FORMAT_RGB_565) {
//对应mat应该是CV_8UC2
Mat temp(info.height, info.width, CV_8UC2, pixels);
//mat 是CV_8UC4 ,CV_8UC2 > CV_8UC4
cvtColor(temp, mat, COLOR_BGR5652BGRA);
}
//解锁Bitmap画布
AndroidBitmap_unlockPixels(env, bitmap);
return;
} catch (Exception &e) {
AndroidBitmap_unlockPixels(env, bitmap);
jclass je = env->FindClass(“java/lang/Exception”);
env->ThrowNew(je, e.what());
return;
} catch (…) {
AndroidBitmap_unlockPixels(env, bitmap);
jclass je = env->FindClass(“java/lang/Exception”);
env->ThrowNew(je, “Unknown exception in JNI code {nBitmapToMat}”);
return;
}
}
/**
-
把mat转成bitmap
-
@param env
-
@param mat
-
@param bitmap
*/
void mat2Bitmap(JNIEnv *env, Mat mat, jobject bitmap) {
//Mat 里面有个type :CV_8UC4刚好对上我们的Bitmap中的ARGB_8888 , CV_8UC2对应Bitmap中的RGB_555
//获取 bitmap 信息
AndroidBitmapInfo info;
void *pixels;
try {
// AndroidBitmap_getInfo(env, bitmap, &info);
//锁定Bitmap画布
// AndroidBitmap_lockPixels(env, bitmap, &pixels);
CV_Assert(AndroidBitmap_getInfo(env, bitmap, &info) >= 0);
CV_Assert(info.format == ANDROID_BITMAP_FORMAT_RGBA_8888 ||
info.format == ANDROID_BITMAP_FORMAT_RGB_565);
CV_Assert(mat.dims == 2 && info.height == (uint32_t) mat.rows &&
info.width == (uint32_t) mat.cols);
CV_Assert(mat.type() == CV_8UC1 || mat.type() == CV_8UC3 || mat.type() == CV_8UC4);
CV_Assert(AndroidBitmap_lockPixels(env, bitmap, &pixels) >= 0);
CV_Assert(pixels);
if (info.format == ANDROID_BITMAP_FORMAT_RGBA_8888) {
//对应mat应该是CV_8UC4
Mat temp(info.height, info.width, CV_8UC4, pixels);
if (mat.type() == CV_8UC4) {
mat.copyTo(temp);
} else if (mat.type() == CV_8UC2) {
cvtColor(mat, temp, COLOR_BGR5652BGRA);
} else if (mat.type() == CV_8UC1) {//灰度mat
cvtColor(mat, temp, COLOR_GRAY2BGRA);
} else if (mat.type() == CV_8UC3) {
cvtColor(mat, temp, COLOR_RGB2BGRA);
}
} else if (info.format == ANDROID_BITMAP_FORMAT_RGB_565) {
//对应mat应该是CV_8UC2
Mat temp(info.height, info.width, CV_8UC2, pixels);
if (mat.type() == CV_8UC4) {
cvtColor(mat, temp, COLOR_BGRA2BGR565);
} else if (mat.type() == CV_8UC2) {
mat.copyTo(temp);
} else if (mat.type() == CV_8UC1) {//灰度mat
cvtColor(mat, temp, COLOR_GRAY2BGR565);
} else if (mat.type() == CV_8UC3) {
cvtColor(mat, temp, COLOR_RGB2BGR565);
}
}
//解锁Bitmap画布
AndroidBitmap_unlockPixels(env, bitmap);
return;
} catch (const Exception &e) {
AndroidBitmap_unlockPixels(env, bitmap);
jclass je = env->FindClass(“java/lang/Exception”);
env->ThrowNew(je, e.what());
return;
} catch (…) {
AndroidBitmap_unlockPixels(env, bitmap);
jclass je = env->FindClass(“java/lang/Exception”);
env->ThrowNew(je, “Unknown exception in JNI code {nMatToBitmap}”);
return;
}
}
- 人脸检测Activity
package com.hxg.ndkface;
import android.Manifest;
import android.annotation.SuppressLint;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.hardware.Camera;
import android.os.Bundle;
import androidx.appcompat.app.AppCompatActivity;
import androidx.appcompat.widget.AppCompatImageView;
import androidx.appcompat.widget.AppCompatTextView;
import androidx.arch.core.executor.ArchTaskExecutor;
import com.hxg.ndkface.camera.AutoTexturePreviewView;
import com.hxg.ndkface.manager.CameraPreviewManager;
import com.hxg.ndkface.model.SingleBaseConfig;
import com.hxg.ndkface.utils.CornerUtil;
import com.hxg.ndkface.utils.FileUtils;
import com.tbruyelle.rxpermissions3.RxPermissions;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
public class MainNorFaceActivity extends AppCompatActivity {
private Bitmap mFaceBitmap;
private FaceDetection mFaceDetection;
private File mCascadeFile;
private AppCompatTextView mTextView;
private RxPermissions rxPermissions;
private AutoTexturePreviewView mAutoCameraPreviewView;
// 图片越大,性能消耗越大,也可以选择640480, 1280720
private static final int PREFER_WIDTH = SingleBaseConfig.getBaseConfig().getRgbAndNirWidth();
private static final int PERFER_HEIGH = SingleBaseConfig.getBaseConfig().getRgbAndNirHeight();
@Override
public void onDetachedFromWindow() {
super.onDetachedFromWindow();
CameraPreviewManager.getInstance().stopPreview();
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
rxPermissions = new RxPermissions(this);
mTextView = findViewById(R.id.note);
mAutoCameraPreviewView = findViewById(R.id.auto_camera_preview_view);
mFaceBitmap = BitmapFactory.decodeResource(getResources(), R.drawable.face);
copyCaseCadeFile();
mFaceDetection = new FaceDetection();
mFaceDetection.loadCascade(mCascadeFile.getAbsolutePath());
}
/**
- 加载人脸识别的分类器文件
*/
private void copyCaseCadeFile() {
try {
// load cascade file from application resources
InputStream is = getResources().openRawResource(R.raw.lbpcascade_frontalface);
File cascadeDir = getDir(“cascade”, Context.MODE_PRIVATE);
mCascadeFile = new File(cascadeDir, “lbpcascade_frontalface.xml”);
if (mCascadeFile.exists()) return;
FileOutputStream os = new FileOutputStream(mCascadeFile);
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
os.write(buffer, 0, bytesRead);
}
is.close();
os.close();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void onAttachedToWindow() {
super.onAttachedToWindow();
CornerUtil.clipViewCircle(mAutoCameraPreviewView);
rxPermissions.request(
Manifest.permission.WRITE_EXTERNAL_STORAGE,
Manifest.permission.READ_EXTERNAL_STORAGE,
Manifest.permission.CAMERA)
.subscribe(aBoolean -> {
startTestOpenDebugRegisterFunction();
});
}
@SuppressLint(“RestrictedApi”)
private void startTestOpenDebugRegisterFunction() {
CameraPreviewManager.getInstance().setCameraFacing(CameraPreviewManager.CAMERA_FACING_FRONT);
CameraPreviewManager.getInstance().startPreview(this, mAutoCameraPreviewView,
PREFER_WIDTH, PERFER_HEIGH, (byte[] data, Camera camera, int width, int height) -> {
//识别人脸,保存人脸特征信息
自我介绍一下,小编13年上海交大毕业,曾经在小公司待过,也去过华为、OPPO等大厂,18年进入阿里一直到现在。
深知大多数初中级Android工程师,想要提升技能,往往是自己摸索成长或者是报班学习,但对于培训机构动则近万的学费,着实压力不小。自己不成体系的自学效果低效又漫长,而且极易碰到天花板技术停滞不前!
因此收集整理了一份《2024年Android移动开发全套学习资料》,初衷也很简单,就是希望能够帮助到想自学提升又不知道该从何学起的朋友,同时减轻大家的负担。
既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,基本涵盖了95%以上Android开发知识点,真正体系化!
由于文件比较大,这里只是将部分目录截图出来,每个节点里面都包含大厂面经、学习笔记、源码讲义、实战项目、讲解视频,并且会持续更新!
如果你觉得这些内容对你有帮助,可以扫码获取!!(资料价值较高,非无偿)
文末
好了,今天的分享就到这里,如果你对在面试中遇到的问题,或者刚毕业及工作几年迷茫不知道该如何准备面试并突破现状提升自己,对于自己的未来还不够了解不知道给如何规划,可以来看看同行们都是如何突破现状,怎么学习的,来吸收他们的面试以及工作经验完善自己的之后的面试计划及职业规划。
这里放上一部分我工作以来以及参与过的大大小小的面试收集总结出来的一套进阶学习的视频及面试专题资料包,主要还是希望大家在如今大环境不好的情况下面试能够顺利一点,希望可以帮助到大家
《Android学习笔记总结+移动架构视频+大厂面试真题+项目实战源码》,点击传送门即可获取!
、OPPO等大厂,18年进入阿里一直到现在。**
深知大多数初中级Android工程师,想要提升技能,往往是自己摸索成长或者是报班学习,但对于培训机构动则近万的学费,着实压力不小。自己不成体系的自学效果低效又漫长,而且极易碰到天花板技术停滞不前!
因此收集整理了一份《2024年Android移动开发全套学习资料》,初衷也很简单,就是希望能够帮助到想自学提升又不知道该从何学起的朋友,同时减轻大家的负担。
[外链图片转存中…(img-1HGURRRg-1711538514872)]
[外链图片转存中…(img-3eUC4JUv-1711538514873)]
[外链图片转存中…(img-yi2AjoDm-1711538514873)]
[外链图片转存中…(img-Z4OH6qdY-1711538514873)]
[外链图片转存中…(img-oWwkdxIK-1711538514874)]
既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,基本涵盖了95%以上Android开发知识点,真正体系化!
由于文件比较大,这里只是将部分目录截图出来,每个节点里面都包含大厂面经、学习笔记、源码讲义、实战项目、讲解视频,并且会持续更新!
如果你觉得这些内容对你有帮助,可以扫码获取!!(资料价值较高,非无偿)
文末
好了,今天的分享就到这里,如果你对在面试中遇到的问题,或者刚毕业及工作几年迷茫不知道该如何准备面试并突破现状提升自己,对于自己的未来还不够了解不知道给如何规划,可以来看看同行们都是如何突破现状,怎么学习的,来吸收他们的面试以及工作经验完善自己的之后的面试计划及职业规划。
这里放上一部分我工作以来以及参与过的大大小小的面试收集总结出来的一套进阶学习的视频及面试专题资料包,主要还是希望大家在如今大环境不好的情况下面试能够顺利一点,希望可以帮助到大家
[外链图片转存中…(img-FTBrOa4w-1711538514874)]