TFLite: Interpreter::AllocateTensors

 

ResizeInputTensor

  TfLiteStatus Interpreter::ResizeInputTensor(int tensor_index, //要resize的tensor                                                                    
                                              const std::vector<int>& dims) {//要resize成什么样
    TfLiteTensor* tensor = &context_.tensors[tensor_index];
    // Short-circuit the state change if the dimensions don't change, avoiding
    // unnecessary (re)allocations.
    if (EqualArrayAndTfLiteIntArray(tensor->dims, dims.size(), dims.data())) {
      return kTfLiteOk;
    }
  
    state_ = kStateUninvokable;
    return ResizeTensorImpl(tensor, ConvertVectorToTfLiteIntArray(dims));
  }

ResizeTensorImpl

  TfLiteStatus Interpreter::ResizeTensorImpl(TfLiteTensor* tensor,
                                             TfLiteIntArray* new_size) {                                                                                                                                    
    // Note that in theory we could resize kTfLiteArenaRwPersistent tensors too.
    if (tensor->allocation_type == kTfLiteArenaRw ||
        tensor->allocation_type == kTfLiteDynamic ||
        tensor->allocation_type == kTfLiteArenaRwPersistent) {
      tensor_resized_since_op_invoke_ |=
          TfLiteIntArrayEqual(tensor->dims, new_size) == 0;
      if (tensor->type != kTfLiteString) {
        size_t bytesRequired;
        TfLiteStatus status = BytesRequired(tensor->type, new_size->data,
                                            new_size->size, &bytesRequired);
        if (status != kTfLiteOk) {
          TfLiteIntArrayFree(new_size);
          return kTfLiteError;
        }
  
        // Realloc space for kTfLiteDynamic tensors.
        TfLiteTensorRealloc(bytesRequired, tensor);
        tensor->bytes = bytesRequired;
      }
      if (tensor->dims) TfLiteIntArrayFree(tensor->dims);
      tensor->dims = new_size;
  
      if (tensor->allocation_type != kTfLiteDynamic) {
        tensor->data.raw = nullptr;
      }
    } else {
      // kTfLiteMmapRo tensors are stored in the flatbuffer and are therefore
      // of fixed size.
      TfLiteIntArrayFree(new_size);
      ReportError(&context_, "Attempting to resize a fixed-size tensor.");
      return kTfLiteError;
    }
    return kTfLiteOk;
  }

resize后要重新AlloctateTensors

interpreter运行前需要  AllocateTensors

// Update allocations for all tensors. This will redim dependent tensors using
  // the input tensor dimensionality as given. This is relatively expensive.
  // If you know that your sizes are not changing, you need not call this.

  // Returns status of success or failure.
  TfLiteStatus AllocateTensors();

TfLiteStatus Interpreter::AllocateTensors() {
    // Explicit (re)allocation is necessary if nodes have been changed or tensors
    // have been resized. For inputs marked as dynamic, we can't short-circuit the
    // allocation as the client may have done the resize manually.
    if (state_ != kStateUninvokable && !HasDynamicTensorImpl(context_, inputs_)) {
      return kTfLiteOk;
    }
  
    next_execution_plan_index_to_prepare_ = 0;
    if (memory_planner_) {
      TF_LITE_ENSURE_STATUS(memory_planner_->ResetAllocations());          
    }
  
    TF_LITE_ENSURE_STATUS(PrepareOpsAndTensors());
  
    state_ = kStateInvokable;
  
    // Reset the variable tensors to zero after (re)allocating the tensors.
    // Developers shouldn't rely on the side effect of this function to reset
    // variable tesnsors. They should call `ResetVariableTensors` directly
    // instead.
    ResetVariableTensors();
  
    return kTfLiteOk;
}

1. memory_planner_


  std::unique_ptr<MemoryPlanner> memory_planner_;

2. PrepareOpsAndTensors


  TfLiteStatus Interpreter::PrepareOpsAndTensors() {
    if (!memory_planner_) {
      memory_planner_.reset(new ArenaPlanner(
          &context_, std::unique_ptr<GraphInfo>(new InterpreterInfo(this)),
          /*preserve_inputs=*/true, /*preserve_intermediates*/ false));
      memory_planner_->PlanAllocations();
    } 
      
    int last_exec_plan_index_prepared = 0;
      
    TF_LITE_ENSURE_STATUS(PrepareOpsStartingAt(
        next_execution_plan_index_to_prepare_, &last_exec_plan_index_prepared));                                                   
    TF_LITE_ENSURE_STATUS(memory_planner_->ExecuteAllocations(
        next_execution_plan_index_to_prepare_, last_exec_plan_index_prepared));
      
    next_execution_plan_index_to_prepare_ = last_exec_plan_index_prepared + 1;
    return kTfLiteOk;
  }

2.1 memory_planner_->PlanAllocations()//规划memory分配

 

2.2 PrepareOpsStartingAt


  TfLiteStatus Interpreter::PrepareOpsStartingAt(                                                                                                                                                                  
      int first_execution_plan_index, int* last_execution_plan_index_prepared) {
    for (int execution_plan_index = first_execution_plan_index;
       execution_plan_index < execution_plan_.size(); execution_plan_index++) {
      int node_index = execution_plan_[execution_plan_index];
      TfLiteNode& node = nodes_and_registration_[node_index].first;
      const TfLiteRegistration& registration =
          nodes_and_registration_[node_index].second;
      EnsureTensorsVectorCapacity();
      if (OpPrepare(registration, &node) == kTfLiteError) {
        return ReportOpError(&context_, node, registration, node_index,
                             "failed to prepare");
      }    
  
      *last_execution_plan_index_prepared = execution_plan_index;
  
      // Discontinue if the node has dynamic outputs. Note that we don't
      // stop for dynamic temporary tensors since they won't affect the
      // sizes of other tensors in the graph.
      if (HasDynamicTensor(context_, node.outputs)) {
        break;
      }    
    }
    return kTfLiteOk;
  }

2.2.1 OpPrepare(registration, &node)


  // Prepare the given 'node' for execution.
  TfLiteStatus OpPrepare(const TfLiteRegistration& op_reg, TfLiteNode* node) {                
    if (op_reg.prepare == nullptr) return kTfLiteOk;
    return op_reg.prepare(&context_, node);
  } 

2.3 memory_planner_->ExecuteAllocations


  TfLiteStatus ArenaPlanner::ExecuteAllocations(int first_node, int last_node) {
    // Grow the size of `allocs_` if necessary. This allows allocating temporary
    // tensors in op's `prepare` function.
    TF_LITE_ENSURE(context_, graph_info_->num_tensors() >= allocs_.size());
    allocs_.resize(graph_info_->num_tensors());
  
    TF_LITE_ENSURE_STATUS(CalculateAllocations(first_node, last_node));
    TF_LITE_ENSURE_STATUS(Commit());
  
  for (int i = 0; i < graph_info_->num_tensors(); ++i) {
      // TODO(ahentz): we could do this only for the tensors that were modified
      // in CalculateAllocations(), instead of redoing it for tensors that
      // already had proper pointers. However we must be very careful, because
      // SimpleMemoryArena::Commit() could move the base pointer.
      TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));                                                                              
    }
  
    return kTfLiteOk;
  }

2.4 ResetVariableTensors


分配内存后reset variableTensors
  TfLiteStatus Interpreter::ResetVariableTensors() {                                                                              
    for (auto& tensor : tensors_) {
      if (!tensor.is_variable) {
        continue;
      }
    
      // Variable tensors have to be `kTfLiteArenaRwPersistent`, and must be
      // allocated after the initial `PrepareOpsAndTensors()` is called.
      TF_LITE_ENSURE_EQ(&context_, tensor.allocation_type,
                        kTfLiteArenaRwPersistent);
      TF_LITE_ENSURE(&context_, tensor.data.raw != nullptr);
    
      memset(tensor.data.raw, 0, tensor.bytes);
    }
    return kTfLiteOk;
  } 
 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
部署 tflite 模型到 esp32 中可以分为以下步骤: 1. 将 tflite 模型转换为 C 数组,可以使用 TensorFlow Lite Converter 工具进行转换。具体转换方法可以参考 TensorFlow Lite 官方文档。 2. 在 esp32 的 Arduino 开发环境中,创建一个新的项目,并将转换后的 C 数组代码复制到项目的源文件中。 3. 在项目中引入 TensorFlow Lite 库,可以通过 Arduino 库管理器安装,或手动下载并添加到项目中。 4. 在 setup 函数中初始化 TensorFlow Lite 解释器,并加载模型。具体代码可以参考 TensorFlow Lite 官方文档。 5. 在 loop 函数中输入数据,执行推理,并输出结果。具体代码可以参考 TensorFlow Lite 官方文档。 下面是一份简单的示例代码: ```c++ #include <Arduino.h> #include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/micro_interpreter.h" #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" #include "tensorflow/lite/version.h" // 模型代码 #include "model_data.h" // 输入数据 float input_data[] = {1.0, 2.0, 3.0}; // 输出数据 float output_data[1]; // 初始化 TensorFlow Lite 解释器 tflite::MicroInterpreter* interpreter; tflite::MicroErrorReporter error_reporter; const tflite::Model* model; tflite::MicroMutableOpResolver op_resolver; void setup() { // 初始化解释器 model = tflite::GetModel(model_data); if (model->version() != TFLITE_SCHEMA_VERSION) { error_reporter.Report( "Model provided is schema version %d not equal " "to supported version %d.", model->version(), TFLITE_SCHEMA_VERSION); return; } op_resolver.AddBuiltin(tflite::BuiltinOperator_CONV_2D, tflite::ops::micro::Register_CONV_2D()); static tflite::MicroInterpreter static_interpreter( model, op_resolver, tensor_arena, kTensorArenaSize, &error_reporter); interpreter = &static_interpreter; // 分配张量 interpreter->AllocateTensors(); // 获取输入张量和输出张量 TfLiteTensor* input = interpreter->input(0); TfLiteTensor* output = interpreter->output(0); // 将输入数据复制到输入张量中 memcpy(input->data.f, input_data, 3 * sizeof(float)); // 执行推理 interpreter->Invoke(); // 将输出张量中的数据复制到输出数组中 memcpy(output_data, output->data.f, 1 * sizeof(float)); } void loop() { // 输出结果 Serial.println(output_data[0]); } ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值