Table of Contents
flatbuffers中的数据结构是schema文件通过flabuffers协议生成的包含在xxx.h文件中的数据结构,用于访问flatbuffers文件中数据。TfLite数据结构如TfLiteTensor, TfLiteNode, TfLiteRegistration等用于 TfLIte interpreter, 把flatbuffer中的数据进行处理使相关数据在一个数据结构里。
flatbuffers中数据结构如下
OpCode是模型文件中所用的opCode的种类,是个数组的形式,op根据 op_index从opCode中找到对应的opCode
Buffer的内容是权重weights,Buffer和OpCode都是已数据的形式保存,tenor根据tensor_index找到对应的Buffer
Sub-Graph是model文件中主要结构体描述计算图 graph, 而graph的构成是数据tensor和处理数据的算子 operator
TfLite数据结构
所用TfLite相关的数据结构都已TfLite开头
TfLiteIntArray
这个数据结构不只用于描述Tensor的形状(维度数和每个维度的大小),还用于描述TfLiteNode的输入、输出和临时TfLiteTensor的个数和每个tensor对应的tensor_index。理解这个对看代码很重要
// Fixed size list of integers. Used for dimensions and inputs/outputs tensor
// indices
typedef struct {
int size;
int data[];
} TfLiteIntArray;
TfLiteTensor:dims
//6. allocate memory for dims and valued by tensor shape
result->dims = reinterpret_cast<TfLiteIntArray*>(AllocateMemory(
sizeof(int) * (flatbuffer_tensor.shape()->Length() + 1), sizeof(int)));
result->dims->size = flatbuffer_tensor.shape()->Length();
>> for (int n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) {
result->dims->data[n] = flatbuffer_tensor.shape()->Get(n);
}
TfLiteNode: inputs/outputs
TfLiteIntArray* inputs_array = const_cast<TfLiteIntArray*>(
reinterpret_cast<const TfLiteIntArray*>(op->inputs()));
TfLiteIntArray* outputs_array = const_cast<TfLiteIntArray*>(
reinterpret_cast<const TfLiteIntArray*>(op->outputs()));
TfLiteNode node;
node.inputs = inputs_array;
node.outputs = outputs_array;
TfLiteTensor
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* buffers = model_->buffers();
TfLiteStatus SimpleTensorAllocator::AllocateTensor(
const tflite::Tensor& flatbuffer_tensor, int create_before,
int destroy_after,
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* buffers,
ErrorReporter* error_reporter, TfLiteTensor* result,
uint8_t* preallocated_buffer) {
//1. flatbuffer type -> TfLiteType
TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(),
&result->type, error_reporter));
//2.is_variable to is_variable
result->is_variable = flatbuffer_tensor.is_variable();
result->data.raw = nullptr;
result->bytes = 0;
//3. get data from buffers
if (auto* buffer = (*buffers)[flatbuffer_tensor.buffer()]) {//和op_codes类似
if (auto* array = buffer->data()) {
if (size_t array_size = array->size()) {
result->data.raw = // just point to not allocat memory
const_cast<char*>(reinterpret_cast<const char*>(array->data()));
size_t type_size;
TF_LITE_ENSURE_STATUS(BytesRequired(flatbuffer_tensor, array_size,
&result->bytes, &type_size,
error_reporter));
}
}
}
//4. allocation_type: kTfLiteMmapRo
//5. cannot get data from buffer of flatbuffer, then allocate memory by Arena
if (result->data.raw) {
result->allocation_type = kTfLiteMmapRo;
} else {
int data_size = 1;
>> for (int n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) {
data_size *= flatbuffer_tensor.shape()->Get(n);
}
size_t type_size;
TF_LITE_ENSURE_STATUS(BytesRequired(flatbuffer_tensor, data_size,
&result->bytes, &type_size,
error_reporter));
if (preallocated_buffer != nullptr) {
result->data.raw = reinterpret_cast<char*>(preallocated_buffer);
} else {
result->data.raw = //type_size used to align
reinterpret_cast<char*>(AllocateMemory(result->bytes, type_size));
}
//6. allocate memory for dims and valued by tensor shape
result->dims = reinterpret_cast<TfLiteIntArray*>(AllocateMemory(
sizeof(int) * (flatbuffer_tensor.shape()->Length() + 1), sizeof(int)));
result->dims->size = flatbuffer_tensor.shape()->Length();
>> for (int n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) {
result->dims->data[n] = flatbuffer_tensor.shape()->Get(n);
}
//7. quantization
const auto* src_quantization = flatbuffer_tensor.quantization();
if (src_quantization && src_quantization->scale() &&
(src_quantization->scale()->size() > 0) &&
src_quantization->zero_point() &&
(src_quantization->zero_point()->size() > 0)) {
result->params.scale = src_quantization->scale()->Get(0);
result->params.zero_point = src_quantization->zero_point()->Get(0);
}
result->allocation = nullptr; //An opaque pointer to a tflite::MMapAllocation
//8. tensor name
if (flatbuffer_tensor.name()) {
result->name = flatbuffer_tensor.name()->c_str();
} else {
result->name = "<No name>";
}
result->delegate = nullptr;
result->buffer_handle = 0;
result->data_is_stale = false;
return kTfLiteOk;
}
TfLiteRegistration
typedef struct _TfLiteRegistration {
void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
void (*free)(TfLiteContext* context, void* buffer);
TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
const char* (*profiling_string)(const TfLiteContext* context, const TfLiteNode* node);
int32_t builtin_code;
const char* custom_name;
int version;
} TfLiteRegistration;
auto opcodes = model_->operator_codes();
printf("opcodes: %d\n", opcodes->size());// all need opcodes
>> for (int i = 0; i < operators_->size(); ++i) {
const auto* op = operators_->Get(i);
int index = op->opcode_index();
auto opcode = (*opcodes)[index];
const TfLiteRegistration* registration = nullptr;
status = GetRegistrationFromOpCode(opcode, op_resolver_, error_reporter_,
®istration); //获得的registration中包含builtin_code吗?两者怎么保持一直?
}
TfLiteStatus GetRegistrationFromOpCode( //直接使用flatbuffer中的数据结构 OperatorCode
const OperatorCode* opcode, const OpResolver& op_resolver,
ErrorReporter* error_reporter, const TfLiteRegistration** registration) {
TfLiteStatus status = kTfLiteOk;
*registration = nullptr;
auto builtin_code = opcode->builtin_code(); //调用的成员函数不是成员变量
int version = opcode->version();
*registration = op_resolver.FindOp(builtin_code, version);
*registration = op_resolver.FindOp(name, version);
}
BuiltinOperator op_type = //后面直接使用registration的内容,说明得到regi中已经赋值,不是来着flatbuffer
static_cast<BuiltinOperator>(registration->builtin_code);
TfLiteRegistration的实现
实现TfLiteRegistration的文件并知道和那个built_code关联
TfLiteRegistration* Register_RESHAPE() {
//static TfLiteRegistration r = {nullptr, nullptr, reshape::Prepare,
static TfLiteRegistration r = {nullptr, nullptr, nullptr,
reshape::Eval};
return &r;
}
//AddBuiltin时才赋值、关联BuiltinOperator, version到TfLiteRegistration
AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
void MicroMutableOpResolver::AddBuiltin(tflite::BuiltinOperator op,
TfLiteRegistration* registration,
int min_version, int max_version) {
for (int version = min_version; version <= max_version; ++version) {
TfLiteRegistration* new_registration = ®istrations_[registrations_len_];
registrations_len_ += 1;
*new_registration = *registration;
new_registration->builtin_code = op;
new_registration->version = version;
}
}
TfLiteNode
// A structure representing an instance of a node.
// This structure only exhibits the inputs, outputs and user defined data, not
// other features like the type.
TfLiteNode里包含的是operator要使用的数据,但没有operator的信息,两者是怎样关联上的?
根据model进行推理的过程是按照operators_ = subgraph_->operators();的顺序进行的,根据oprator可以得到TfLiteRegistration, ParseOpData->builtin_data 或custom_data [两者统称init_data], registration->init 得到user_data
operator需要的input/output tensors等等
上面通过operator得到的数据赋值到TfLiteNode, TfLiteNode做为oprerator的输入参数(init.free除外),这样两者关联上。
registration->prepare(&context_, &node);
registration->invoke(&context_, &node)
StackDataAllocator stack_data_allocator;
const char* custom_data = nullptr;
size_t custom_data_size = 0;
unsigned char* builtin_data = nullptr;
if (op->custom_options()) {
custom_data = reinterpret_cast<const char*>(op->custom_options()->data());
custom_data_size = op->custom_options()->size();
} else {
>> TF_LITE_ENSURE_STATUS(ParseOpData(op, op_type, error_reporter_,
&stack_data_allocator,
(void**)(&builtin_data)));
}
const char* init_data;
size_t init_data_size;
if (registration->builtin_code == BuiltinOperator_CUSTOM) {
init_data = custom_data;
init_data_size = custom_data_size;
} else {
init_data = reinterpret_cast<const char*>(builtin_data);
init_data_size = 0;
}
void* user_data = nullptr;
if (registration->init) {
user_data = registration->init(&context_, init_data, init_data_size);
}
// Disregard const qualifier to workaround with existing API.
TfLiteIntArray* inputs_array = const_cast<TfLiteIntArray*>(
reinterpret_cast<const TfLiteIntArray*>(op->inputs()));
TfLiteIntArray* outputs_array = const_cast<TfLiteIntArray*>(
reinterpret_cast<const TfLiteIntArray*>(op->outputs()));
// using static memory create a TfLiteIntArray to store temporaries tensor
const int kMaxTemporaries = 16;
int temporaries_data[kMaxTemporaries + 1];
TfLiteIntArray* temporaries_array =
reinterpret_cast<TfLiteIntArray*>(temporaries_data);
temporaries_array->size = 0;
TfLiteNode node;
node.inputs = inputs_array;
node.outputs = outputs_array;
node.temporaries = temporaries_array;
node.user_data = user_data;
node.builtin_data = reinterpret_cast<void*>(builtin_data);
node.custom_initial_data = custom_data;
node.custom_initial_data_size = custom_data_size;
node.delegate = nullptr;
TfLiteContext
context_.impl_ = static_cast<void*>(this);
context_.ReportError = ReportOpError;
context_.recommended_num_threads = 1;
tensors_ = subgraph_->tensors(); //flatbuffers中所用的tensors来着这里
context_->tensors_size = tensors_->size();
context_->tensors =
reinterpret_cast<TfLiteTensor*>(tensor_allocator_.AllocateMemory(
sizeof(TfLiteTensor) * context_->tensors_size, 4));