去OpenCV官网,下载Android的包。
创建Android的C++工程:
解压OpenCV后:
复制libs:找到sdk->native->libs文件夹:
复制头文件:找到sdk->native->jni文件夹:
复制相应的包到工程中的cpp目录下:
配置CMakeLists.txt:
...
include_directories(include)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -L${CMAKE_SOURCE_DIR}/libs/${CMAKE_ANDROID_ARCH_ABI}")
...
target_link_libraries(
...
opencv_java4
android
...)
还需要在build.gradle中添加配置:
android {
...
defaultConfig {
...
externalNativeBuild {
cmake {
abiFilters "armeabi-v7a","arm64-v8a","x86","x86_64"
cppFlags '-std=c++11'
}
}
}
...
sourceSets {
main {
jniLibs.srcDirs = ['src/main/cpp/libs']
}
}
...
}
配置完毕,编译项目,然后在native-lib.cpp引入头文件,就能看出是否引入成功:
然后就是使用:
首先配置权限和6.0以上适配不用再说了,这个是基础;
然后添加camerax包:
def camerax_version = "1.0.0-alpha05"
// CameraX core library using camera2 implementation
implementation "androidx.camera:camera-camera2:$camerax_version"
然后MainActivity.java:
public class MainActivity extends AppCompatActivity implements ImageAnalysis.Analyzer {
static {
System.loadLibrary("opencv_study001");
}
private ActivityMainBinding binding;
int width = 1280;
int height = 960;
private HandlerThread handlerThread;
private CameraX.LensFacing currentFacing = CameraX.LensFacing.FRONT;
boolean setSurfaceView = false;
float screenWidth = 0;
File mCascadeFile;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
binding = ActivityMainBinding.inflate(getLayoutInflater());
setContentView(binding.getRoot());
if (!PermissionUtil.checkPermissions(this)) {
PermissionUtil.requestPermission(this, 0x008);
}
screenWidth = ScreenUtil.getScreenWidth(this);
binding.surfaceView.getHolder().addCallback(new SurfaceHolder.Callback() {
@Override
public void surfaceCreated(@NonNull SurfaceHolder holder) {
}
@Override
public void surfaceChanged(@NonNull SurfaceHolder holder, int format, int width, int height) {
new Thread(() -> setSurface(holder.getSurface())).start();
}
@Override
public void surfaceDestroyed(@NonNull SurfaceHolder holder) {
}
});
copyCascadeFile();
loadCascade(mCascadeFile.getAbsolutePath());
startCamera();
}
private native void loadCascade(String path);
private native void setSurface(Surface surface);
private native void ffmpegRender(byte[] bytes, int width, int height);
@SuppressLint("UnsafeExperimentalUsageError")
private void startCamera() {
//子线程中回调
handlerThread = new HandlerThread("Analyze-thread");
handlerThread.start();
CameraX.bindToLifecycle(this, getAnalysis());
}
private ImageAnalysis getAnalysis() {
ImageAnalysisConfig imageAnalysisConfig = new ImageAnalysisConfig.Builder()
.setCallbackHandler(new Handler(handlerThread.getLooper()))
.setLensFacing(currentFacing)
.setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
.setTargetResolution(new Size(width, height))
.build();
ImageAnalysis imageAnalysis = new ImageAnalysis(imageAnalysisConfig);
imageAnalysis.setAnalyzer(this);
return imageAnalysis;
}
@Override
public void analyze(ImageProxy image, int rotationDegrees) {
if (!setSurfaceView) {
runOnUiThread(() -> setSurfaceView(image.getWidth(), image.getHeight()));
setSurfaceView = true;
}
byte[] bytes = ImageUtils.getBytes(image, rotationDegrees, image.getWidth(), image.getHeight());
ffmpegRender(bytes, image.getWidth(), image.getHeight());
}
private void setSurfaceView(int imageWidth, int imageHeight) {
float surfaceWidth = screenWidth;
float surfaceHeight = (float) imageWidth / (float) imageHeight * surfaceWidth;
ConstraintLayout.LayoutParams layoutParams = new ConstraintLayout.LayoutParams((int) surfaceWidth, (int) surfaceHeight);
binding.surfaceView.setLayoutParams(layoutParams);
}
protected void copyCascadeFile() {
try {
// load cascade file from application resources
InputStream is = getResources().openRawResource(R.raw.lbpcascade_frontalface);
File cascadeDir = getDir("cascade", Context.MODE_PRIVATE);
mCascadeFile = new File(cascadeDir, "lbpcascade_frontalface2.xml");
if (mCascadeFile.exists()) return;
FileOutputStream os = new FileOutputStream(mCascadeFile);
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
os.write(buffer, 0, bytesRead);
}
is.close();
os.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
native-lib.cpp:
#include <jni.h>
#include <string>
#include "opencv2/opencv.hpp"
#include <android/native_window.h>
#include <android/native_window_jni.h>
using namespace cv;
using namespace std;
ANativeWindow *window;
CascadeClassifier cascadeClassifier;
//人脸检测
void detectorface(Mat &src) {
vector<Rect> faces;
Mat gray;
//灰度图
cvtColor(src, gray, COLOR_BGRA2GRAY);
//直方图均衡化
equalizeHist(gray, gray);
//多尺度人脸检测
cascadeClassifier.detectMultiScale(gray, faces,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
| CASCADE_SCALE_IMAGE,
Size(30, 30));
// cascadeClassifier.detectMultiScale(gray, faces, 2, 3, 0);
//在源图上画出人脸
for (Rect face : faces) {
rectangle(src, face, Scalar(255, 0, 0));
}
}
extern "C"
JNIEXPORT void JNICALL
Java_com_opencv_MainActivity_loadCascade(JNIEnv *env, jobject thiz, jstring path_) {
const char *filepath = env->GetStringUTFChars(path_, 0);
cascadeClassifier.load(filepath);
env->ReleaseStringUTFChars(path_, filepath);
}
extern "C"
JNIEXPORT void JNICALL
Java_com_opencv_MainActivity_setSurface(JNIEnv *env, jobject thiz,
jobject surface) {
//先释放之前的显示窗口
if (window) {
ANativeWindow_release(window);
window = 0;
}
//创建新的窗口用于显示视频
window = ANativeWindow_fromSurface(env, surface);
}
extern "C"
JNIEXPORT void JNICALL
Java_com_opencv_MainActivity_ffmpegRender(JNIEnv *env, jobject thiz,
jbyteArray buffer, jint width,
jint height) {
jbyte *data = env->GetByteArrayElements(buffer, 0);
// 数据的行数也就是数据高度,因为数据类型是NV21,所以为Y+U+V的高度, 也就是height + height/4 + height/4
Mat src(height * 3 / 2, width, CV_8UC1, data);
// 转RGB
cvtColor(src, src, COLOR_YUV2BGRA_YV12);
//逆时针旋转90度
rotate(src, src, ROTATE_90_COUNTERCLOCKWISE);
flip(src, src, 1);
//检测图片
detectorface(src);
//显示到serface
if (window) {
ANativeWindow_setBuffersGeometry(window, src.cols, src.rows, WINDOW_FORMAT_RGBA_8888);
ANativeWindow_Buffer window_buffer;
do {
//lock失败 直接brek出去
if (ANativeWindow_lock(window, &window_buffer, 0)) {
ANativeWindow_release(window);
window = 0;
break;
}
uint8_t *dst_data = static_cast<uint8_t *>(window_buffer.bits);
//stride : 一行多少个数据
//(RGBA) * 4
int dst_linesize = window_buffer.stride * 4;
//一行一行拷贝,src.data是图片的RGBA数据,要拷贝到dst_data中,也就是window的缓冲区里
for (int i = 0; i < window_buffer.height; ++i) {
memcpy(dst_data + i * dst_linesize, src.data + i * src.cols * 4, dst_linesize);
}
//提交刷新
ANativeWindow_unlockAndPost(window);
} while (0);
}
src.release();
env->ReleaseByteArrayElements(buffer, data, 0);
}
加更:
其实导包的时候不需要使用so库,太大了,也可以使用这里边的静态库:
然后使用哪个导入哪个就行了。