flatbuffer_conversion相关的代码中使用了一些相对不常见的语法,这里记录下如;
explict, using,lamada函数、嵌入类、std::unique,POD等
flatbuffer_conversions.h
// These functions transform codes and data structures that are defined in the
// flatbuffer serialization format into in-memory values that are used by the
// runtime API and interpreter.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
// Interface class for builtin data allocations.
class BuiltinDataAllocator {
public:
virtual void* Allocate(size_t size) = 0;
virtual void Deallocate(void* data) = 0;
// Allocate a structure, but make sure it is a POD structure that doesn't
// require constructors to run. The reason we do this, is that Interpreter's C
// extension part will take ownership so destructors will not be run during
// deallocation.
template <typename T>
T* AllocatePOD() {
static_assert(std::is_pod<T>::value, "Builtin data structure must be POD.");
return static_cast<T*>(this->Allocate(sizeof(T)));
}
virtual ~BuiltinDataAllocator() {}
};
// Parse the appropriate data out of the op.
//
// This handles builtin data explicitly as there are flatbuffer schemas.
// If it returns kTfLiteOk, it passes the data out with `builtin_data`. The
// calling function has to pass in an allocator object, and this allocator
// will be called to reserve space for the output data. If the calling
// function's allocator reserves memory on the heap, then it's the calling
// function's responsibility to free it.
// If it returns kTfLiteError, `builtin_data` will be `nullptr`.
TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
// Converts the tensor data type used in the flat buffer to the representation
// used by the runtime.
TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
ErrorReporter* error_reporter);
} // namespace tflite
POD是什么
这里用POD是从flatbuffer得到对应数据结构的数据,并没有看出它注释的意思,不管怎样这里接触到了POD
https://zhuanlan.zhihu.com/p/29734547
POD 本意可能是直接与 C 库二进制交流的类型,但其中也有 C++ 独有的类型
flatbuffer_conversion.cc
// Utility class for safely allocating POD data. This is useful for avoiding
// leaks in cases where op params are allocated but fail to propagate to the
// parsed op data (e.g., when model parameters are invalid).
class SafeBuiltinDataAllocator {
public:
class BuiltinDataDeleter { //该内部类的用途是释放得到的资源,取代对应的析构函数
public:
explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator)
: allocator_(allocator) {}
void operator()(void* data) {// 重载了操作符:operator ()[函数调用], 或者说对应有了函数的功能
printf("------> callinto SafeBuiltinDataAllocator <------\n");
allocator_->Deallocate(data);
}
private:
BuiltinDataAllocator* allocator_;
};
template <typename T> using 定义了用于资源管理的类型std::unique_ptr且没有使用默认释放资源的方法,而是自定义
using BuiltinDataPtr = std::unique_ptr<T, BuiltinDataDeleter>;
explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator) //构造 函数
: allocator_(allocator) {}
template <typename T>
BuiltinDataPtr<T> Allocate() { // 成员函数
return BuiltinDataPtr<T>(allocator_->AllocatePOD<T>(), //调用BuiltinDataAllocator的成员函数AllocatePOD
BuiltinDataDeleter(allocator_));
}
private:
BuiltinDataAllocator* allocator_;
};
TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
//c++ lamada表达式
auto parse_activation = [](ActivationFunctionType activation) {
switch (activation) {
case ActivationFunctionType_NONE:
return kTfLiteActNone;
case ActivationFunctionType_RELU:
return kTfLiteActRelu;
case ActivationFunctionType_RELU_N1_TO_1:
return kTfLiteActRelu1;
case ActivationFunctionType_RELU6:
return kTfLiteActRelu6;
case ActivationFunctionType_TANH:
return kTfLiteActTanh;
case ActivationFunctionType_SIGN_BIT:
return kTfLiteActSignBit;
}
return kTfLiteActNone;
};
SafeBuiltinDataAllocator safe_allocator(allocator); //创建对象safe_allocato
*builtin_data = nullptr;
switch (op_type) {
case BuiltinOperator_CONV_2D: {
auto params = safe_allocator.Allocate<TfLiteConvParams>(); //调用成员 函数Allcoate
if (auto* conv_params = op->builtin_options_as_Conv2DOptions()) {
params->padding = parse_padding(conv_params->padding());
params->stride_width = conv_params->stride_w();
params->stride_height = conv_params->stride_h();
params->activation =
parse_activation(conv_params->fused_activation_function());
params->dilation_width_factor = conv_params->dilation_w_factor();
params->dilation_height_factor = conv_params->dilation_h_factor();
}
*builtin_data = reinterpret_cast<void*>(params.release()); //调用release()和管理的资源解除关系(当unique析构时不会调用资源的析构函数)
break;
}
}
explicit 修饰构造函数 显示调用不能隐式调用
using 类似 typedef 定义个数据类型
智能指针unique_ptr
https://www.learncpp.com/cpp-tutorial/15-5-stdunique_ptr/
std::unique_ptr is the C++11 replacement for std::auto_ptr. It should be used to manage
any dynamically allocated object that is not shared by multiple objects.
That is, std::unique_ptr should completely own the object it manages,
not share that ownership with other classes. std::unique_ptr lives in the <memory> header.
#include <iostream>
#include <memory> // for std::unique_ptr
class Resource
{
public:
Resource() { std::cout << "Resource acquired\n"; }
~Resource() { std::cout << "Resource destroyed\n"; }
};
int main()
{
// allocate a Resource object and have it owned by std::unique_ptr
std::unique_ptr<Resource> res(new Resource);
return 0;
} // res goes out of scope here, and the allocated Resource is destroyed
BuiltinDataAllocator的实现类MicroBuiltinDataAllocator
class MicroBuiltinDataAllocator : public BuiltinDataAllocator {
public:
explicit MicroBuiltinDataAllocator(SimpleMemoryAllocator* memory_allocator)
: memory_allocator_(memory_allocator) {}
void* Allocate(size_t size) override {
// Align to an address that is proper for all primitive types, but no more
// than the size.
return memory_allocator_->AllocateFromTail(
size, std::min(size, alignof(max_align_t)));
}
void Deallocate(void* data) override {
// Do not deallocate, builtin data needs to be available for the life time
// of the model.
}
private:
SimpleMemoryAllocator* memory_allocator_;
TF_LITE_REMOVE_VIRTUAL_DELETE
};
//最终实际的工作由SimpleMemoryAllocator完成
buildindata和weight 有什么区别
权重是Node间的相关度,而buildindata说的算子相关的参数,如
struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef Conv2DOptionsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_PADDING = 4,
VT_STRIDE_W = 6,
VT_STRIDE_H = 8,
VT_FUSED_ACTIVATION_FUNCTION = 10,
VT_DILATION_W_FACTOR = 12,
VT_DILATION_H_FACTOR = 14
};
}
bultindata是怎么使用的
TfLiteStatus MicroAllocator::AllocateNodeAndRegistrations(
const OpResolver& op_resolver,
NodeAndRegistration** node_and_registrations) {
TF_LITE_ENSURE_STATUS(ParseOpData(op, op_type, error_reporter_,
&builtin_data_allocator,
(void**)(&builtin_data)));
// Disregard const qualifier to workaround with existing API.
TfLiteIntArray* inputs_array = const_cast<TfLiteIntArray*>(
reinterpret_cast<const TfLiteIntArray*>(op->inputs()));
TfLiteIntArray* outputs_array = const_cast<TfLiteIntArray*>(
reinterpret_cast<const TfLiteIntArray*>(op->outputs()));
TfLiteNode* node = &(output[i].node);
node->inputs = inputs_array;
node->outputs = outputs_array;
// This is OK for now as temporary array is not in used.
// TODO(wangtz): Support scratch buffers.
node->temporaries = nullptr;
node->user_data = nullptr; // Will be filled in after `init`
node->builtin_data = reinterpret_cast<void*>(builtin_data);
node->custom_initial_data = custom_data;
node->custom_initial_data_size = custom_data_size;
node->delegate = nullptr;
}
// A structure representing an instance of a node.
// This structure only exhibits the inputs, outputs and user defined data, not
// other features like the type.
typedef struct {
// Inputs to this node expressed as indices into the simulator's tensors.
TfLiteIntArray* inputs;
// Outputs to this node expressed as indices into the simulator's tensors.
TfLiteIntArray* outputs;
// intermediate tensors to this node expressed as indices into the simulator's
// tensors.
TfLiteIntArray* intermediates;
// Temporary tensors uses during the computations. This usually contains no
// tensors, but ops are allowed to change that if they need scratch space of
// any sort.
TfLiteIntArray* temporaries;
// Opaque data provided by the node implementer through `Registration.init`.
void* user_data;
// Opaque data provided to the node if the node is a builtin. This is usually
// a structure defined in builtin_op_data.h
void* builtin_data;
// Custom initial data. This is the opaque data provided in the flatbuffer.
// WARNING: This is an experimental interface that is subject to change.
const void* custom_initial_data;
int custom_initial_data_size;
// The pointer to the delegate. This is non-null only when the node is
// created by calling `interpreter.ModifyGraphWithDelegate`.
// WARNING: This is an experimental interface that is subject to change.
struct TfLiteDelegate* delegate;
} TfLiteNode;