Tensorflow Lite Android native层C/C++开发

Tensorflow Lite Android C/C++

 

1, Tensorflow Lite Android NDK 编译编译native库

请参考文章Tensorflow源码编译

libtensorflowlite_jni.so为native库,libtensorflowlite.jar所实现java接口,libtensorflowlite-native-header.jar为联系native和java层的JNI接口头文件。

Java的实现:

tensorflow/bazel-tensorflow/tensorflow/lite/java/src/main/java

native的实现:

tensorflow/bazel-tensorflow/tensorflow/lite/java/src/main/native

2,libtensorflowlite-native-header.jar

jar包里有如下头文件:

$ tree libtensorflowlite-native-header
libtensorflowlite-native-header
├── META-INF
│   └── MANIFEST.MF
├── org_tensorflow_lite_NativeInterpreterWrapper.h
├── org_tensorflow_lite_Tensor.h
└── org_tensorflow_lite_TensorFlowLite.h

(1) org_tensorflow_lite_TensorFlowLite.hz

版本号API和初始化Tensorflow Lite的API;

/*
 * Class:     org_tensorflow_lite_TensorFlowLite
 * Method:    version
 * Signature: ()Ljava/lang/String;
 */
JNIEXPORT jstring JNICALL Java_org_tensorflow_lite_TensorFlowLite_version
  (JNIEnv *, jclass);

/*
 * Class:     org_tensorflow_lite_TensorFlowLite
 * Method:    initTensorFlow
 * Signature: ()V
 */
JNIEXPORT void JNICALL Java_org_tensorflow_lite_TensorFlowLite_initTensorFlow
  (JNIEnv *, jclass);

Java_org_tensorflow_lite_TensorFlowLite_version在native/tensorflow_lite_jni.c和native/tensorflow_lite_jni.h中实现。

Java_org_tensorflow_lite_TensorFlowLite_initTensorFlow在native/init_tensorflow_jni.cc和native/init_tensorflow_jni.h中实现;调用了namespace tflite中的InitTensorFlow。

JNIEXPORT void JNICALL Java_org_tensorflow_lite_TensorFlowLite_initTensorFlow(
    JNIEnv* env, jclass clazz) {
  ::tflite::InitTensorFlow();
}

(2) org_tensorflow_lite_NativeInterpreterWrapper.h

主要为interpreter接口;

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    run
 * Signature: (JJ)Z
 */
JNIEXPORT jboolean JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_run
  (JNIEnv *, jclass, jlong, jlong);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    resizeInput
 * Signature: (JJI[I)Z
 */
JNIEXPORT jboolean JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_resizeInput
  (JNIEnv *, jclass, jlong, jlong, jint, jintArray);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    getOutputDataType
 * Signature: (JI)I
 */
JNIEXPORT jint JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputDataType
  (JNIEnv *, jclass, jlong, jint);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    getOutputQuantizationZeroPoint
 * Signature: (JI)I
 */
JNIEXPORT jint JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputQuantizationZeroPoint
  (JNIEnv *, jclass, jlong, jint);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    getOutputQuantizationScale
 * Signature: (JI)F
 */
JNIEXPORT jfloat JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputQuantizationScale
  (JNIEnv *, jclass, jlong, jint);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    allocateTensors
 * Signature: (JJ)J
 */
JNIEXPORT jlong JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_allocateTensors
  (JNIEnv *, jclass, jlong, jlong);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    getInputTensorIndex
 * Signature: (JI)I
 */
JNIEXPORT jint JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputTensorIndex
  (JNIEnv *, jclass, jlong, jint);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    getOutputTensorIndex
 * Signature: (JI)I
 */
JNIEXPORT jint JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputTensorIndex
  (JNIEnv *, jclass, jlong, jint);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    getInputCount
 * Signature: (J)I
 */
JNIEXPORT jint JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputCount
  (JNIEnv *, jclass, jlong);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    getOutputCount
 * Signature: (J)I
 */
JNIEXPORT jint JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputCount
  (JNIEnv *, jclass, jlong);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    getInputNames
 * Signature: (J)[Ljava/lang/String;
 */
JNIEXPORT jobjectArray JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputNames
  (JNIEnv *, jclass, jlong);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    getOutputNames
 * Signature: (J)[Ljava/lang/String;
 */
JNIEXPORT jobjectArray JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputNames
  (JNIEnv *, jclass, jlong);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    useNNAPI
 * Signature: (JZ)V
 */
JNIEXPORT void JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_useNNAPI
  (JNIEnv *, jclass, jlong, jboolean);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    numThreads
 * Signature: (JI)V
 */
JNIEXPORT void JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_numThreads
  (JNIEnv *, jclass, jlong, jint);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    allowFp16PrecisionForFp32
 * Signature: (JZ)V
 */
JNIEXPORT void JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_allowFp16PrecisionForFp32
  (JNIEnv *, jclass, jlong, jboolean);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    allowBufferHandleOutput
 * Signature: (JZ)V
 */
JNIEXPORT void JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_allowBufferHandleOutput
  (JNIEnv *, jclass, jlong, jboolean);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    createErrorReporter
 * Signature: (I)J
 */
JNIEXPORT jlong JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_createErrorReporter
  (JNIEnv *, jclass, jint);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    createModel
 * Signature: (Ljava/lang/String;J)J
 */
JNIEXPORT jlong JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_createModel
  (JNIEnv *, jclass, jstring, jlong);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    createModelWithBuffer
 * Signature: (Ljava/nio/ByteBuffer;J)J
 */
JNIEXPORT jlong JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_createModelWithBuffer
  (JNIEnv *, jclass, jobject, jlong);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    createInterpreter
 * Signature: (JJI)J
 */
JNIEXPORT jlong JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_createInterpreter
  (JNIEnv *, jclass, jlong, jlong, jint);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    applyDelegate
 * Signature: (JJJ)V
 */
JNIEXPORT void JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_applyDelegate
  (JNIEnv *, jclass, jlong, jlong, jlong);

/*
 * Class:     org_tensorflow_lite_NativeInterpreterWrapper
 * Method:    delete
 * Signature: (JJJ)V
 */
JNIEXPORT void JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_delete
  (JNIEnv *, jclass, jlong, jlong, jlong);

nativeinterpreterwrapper_jni.c和nativeinterpreterwrapper_jni.h中实现了具体的接口。

(3)org_tensorflow_lite_Tensor.h

主要为Tensor相关的接口;

/*
 * Class:     org_tensorflow_lite_Tensor
 * Method:    create
 * Signature: (JI)J
 */
JNIEXPORT jlong JNICALL Java_org_tensorflow_lite_Tensor_create
  (JNIEnv *, jclass, jlong, jint);

/*
 * Class:     org_tensorflow_lite_Tensor
 * Method:    delete
 * Signature: (J)V
 */
JNIEXPORT void JNICALL Java_org_tensorflow_lite_Tensor_delete
  (JNIEnv *, jclass, jlong);

/*
 * Class:     org_tensorflow_lite_Tensor
 * Method:    buffer
 * Signature: (J)Ljava/nio/ByteBuffer;
 */
JNIEXPORT jobject JNICALL Java_org_tensorflow_lite_Tensor_buffer
  (JNIEnv *, jclass, jlong);

/*
 * Class:     org_tensorflow_lite_Tensor
 * Method:    writeDirectBuffer
 * Signature: (JLjava/nio/ByteBuffer;)V
 */
JNIEXPORT void JNICALL Java_org_tensorflow_lite_Tensor_writeDirectBuffer
  (JNIEnv *, jclass, jlong, jobject);

/*
 * Class:     org_tensorflow_lite_Tensor
 * Method:    dtype
 * Signature: (J)I
 */
JNIEXPORT jint JNICALL Java_org_tensorflow_lite_Tensor_dtype
  (JNIEnv *, jclass, jlong);

/*
 * Class:     org_tensorflow_lite_Tensor
 * Method:    shape
 * Signature: (J)[I
 */
JNIEXPORT jintArray JNICALL Java_org_tensorflow_lite_Tensor_shape
  (JNIEnv *, jclass, jlong);

/*
 * Class:     org_tensorflow_lite_Tensor
 * Method:    numBytes
 * Signature: (J)I
 */
JNIEXPORT jint JNICALL Java_org_tensorflow_lite_Tensor_numBytes
  (JNIEnv *, jclass, jlong);

/*
 * Class:     org_tensorflow_lite_Tensor
 * Method:    hasDelegateBufferHandle
 * Signature: (J)Z
 */
JNIEXPORT jboolean JNICALL Java_org_tensorflow_lite_Tensor_hasDelegateBufferHandle
  (JNIEnv *, jclass, jlong);

/*
 * Class:     org_tensorflow_lite_Tensor
 * Method:    readMultiDimensionalArray
 * Signature: (JLjava/lang/Object;)V
 */
JNIEXPORT void JNICALL Java_org_tensorflow_lite_Tensor_readMultiDimensionalArray
  (JNIEnv *, jclass, jlong, jobject);

/*
 * Class:     org_tensorflow_lite_Tensor
 * Method:    writeMultiDimensionalArray
 * Signature: (JLjava/lang/Object;)V
 */
JNIEXPORT void JNICALL Java_org_tensorflow_lite_Tensor_writeMultiDimensionalArray
  (JNIEnv *, jclass, jlong, jobject);

/*
 * Class:     org_tensorflow_lite_Tensor
 * Method:    index
 * Signature: (J)I
 */
JNIEXPORT jint JNICALL Java_org_tensorflow_lite_Tensor_index
  (JNIEnv *, jclass, jlong);

tensor_jni.cc和tensor_jni.h中实现了接口;

(4) libtensorflowlite_jni.so 的头文件

include目录顶层放入jni实现的头文件,直接可以来用,可以模仿JAVA的api封装使用JNI接口:

nativeinterpreterwrapper_jni.h 

tensorflow_lite_jni.h 

init_tensorflow_jni.h 

tensor_jni.h

此外,

tensorflow/lite 下的包含头文件的文件夹以及头文件都要拷贝到include目录中,

tensorflow/lite/tools/make/downloads/flatbuffers 下的头文件加入到include目录中,但是保持一定的路劲规则。

$ ls build/tensorflow-armv7a/output/include/
flatbuffers	tensorflow
$ ls build/tensorflow-armv7a/output/include/tensorflow/lite/
allocation.h		context_util.h		interpreter.h		nnapi_delegate.h	simple_memory_arena.h	version.h
arena_planner.h		core			kernels			op_resolver.h		stderr_reporter.h
builtin_op_data.h	delegates		memory_planner.h	optional_debug_tools.h	string.h
builtin_ops.h		error_reporter.h	model.h			profiling		string_util.h
c			experimental		models			python			testing
context.h		graph_info.h		mutable_op_resolver.h	schema			util.h

注:

tensorflow代码顶层路劲,还有

tensorflow/lite/tools/make/downloads 是否加入到需要的编译,目前还没有发现,有需要的话再补充。

  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 7
    评论
评论 7
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值