ResizeInputTensor
TfLiteStatus Interpreter::ResizeInputTensor(int tensor_index, //要resize的tensor
const std::vector<int>& dims) {//要resize成什么样
TfLiteTensor* tensor = &context_.tensors[tensor_index];
// Short-circuit the state change if the dimensions don't change, avoiding
// unnecessary (re)allocations.
if (EqualArrayAndTfLiteIntArray(tensor->dims, dims.size(), dims.data())) {
return kTfLiteOk;
}
state_ = kStateUninvokable;
return ResizeTensorImpl(tensor, ConvertVectorToTfLiteIntArray(dims));
}
ResizeTensorImpl
TfLiteStatus Interpreter::ResizeTensorImpl(TfLiteTensor* tensor,
TfLiteIntArray* new_size) {
// Note that in theory we could resize kTfLiteArenaRwPersistent tensors too.
if (tensor->allocation_type == kTfLiteArenaRw ||
tensor->allocation_type == kTfLiteDynamic ||
tensor->allocation_type == kTfLiteArenaRwPersistent) {
tensor_resized_since_op_invoke_ |=
TfLiteIntArrayEqual(tensor->dims, new_size) == 0;
if (tensor->type != kTfLiteString) {
size_t bytesRequired;
TfLiteStatus status = BytesRequired(tensor->type, new_size->data,
new_size->size, &bytesRequired);
if (status != kTfLiteOk) {
TfLiteIntArrayFree(new_size);
return kTfLiteError;
}
// Realloc space for kTfLiteDynamic tensors.
TfLiteTensorRealloc(bytesRequired, tensor);
tensor->bytes = bytesRequired;
}
if (tensor->dims) TfLiteIntArrayFree(tensor->dims);
tensor->dims = new_size;
if (tensor->allocation_type != kTfLiteDynamic) {
tensor->data.raw = nullptr;
}
} else {
// kTfLiteMmapRo tensors are stored in the flatbuffer and are therefore
// of fixed size.
TfLiteIntArrayFree(new_size);
ReportError(&context_, "Attempting to resize a fixed-size tensor.");
return kTfLiteError;
}
return kTfLiteOk;
}
resize后要重新AlloctateTensors
interpreter运行前需要 AllocateTensors
// Update allocations for all tensors. This will redim dependent tensors using
// the input tensor dimensionality as given. This is relatively expensive.
// If you know that your sizes are not changing, you need not call this.
// Returns status of success or failure.
TfLiteStatus AllocateTensors();
TfLiteStatus Interpreter::AllocateTensors() {
// Explicit (re)allocation is necessary if nodes have been changed or tensors
// have been resized. For inputs marked as dynamic, we can't short-circuit the
// allocation as the client may have done the resize manually.
if (state_ != kStateUninvokable && !HasDynamicTensorImpl(context_, inputs_)) {
return kTfLiteOk;
}
next_execution_plan_index_to_prepare_ = 0;
if (memory_planner_) {
TF_LITE_ENSURE_STATUS(memory_planner_->ResetAllocations());
}
TF_LITE_ENSURE_STATUS(PrepareOpsAndTensors());
state_ = kStateInvokable;
// Reset the variable tensors to zero after (re)allocating the tensors.
// Developers shouldn't rely on the side effect of this function to reset
// variable tesnsors. They should call `ResetVariableTensors` directly
// instead.
ResetVariableTensors();
return kTfLiteOk;
}
1. memory_planner_
std::unique_ptr<MemoryPlanner> memory_planner_;
2. PrepareOpsAndTensors
TfLiteStatus Interpreter::PrepareOpsAndTensors() {
if (!memory_planner_) {
memory_planner_.reset(new ArenaPlanner(
&context_, std::unique_ptr<GraphInfo>(new InterpreterInfo(this)),
/*preserve_inputs=*/true, /*preserve_intermediates*/ false));
memory_planner_->PlanAllocations();
}
int last_exec_plan_index_prepared = 0;
TF_LITE_ENSURE_STATUS(PrepareOpsStartingAt(
next_execution_plan_index_to_prepare_, &last_exec_plan_index_prepared));
TF_LITE_ENSURE_STATUS(memory_planner_->ExecuteAllocations(
next_execution_plan_index_to_prepare_, last_exec_plan_index_prepared));
next_execution_plan_index_to_prepare_ = last_exec_plan_index_prepared + 1;
return kTfLiteOk;
}
2.1 memory_planner_->PlanAllocations()//规划memory分配
2.2 PrepareOpsStartingAt
TfLiteStatus Interpreter::PrepareOpsStartingAt(
int first_execution_plan_index, int* last_execution_plan_index_prepared) {
for (int execution_plan_index = first_execution_plan_index;
execution_plan_index < execution_plan_.size(); execution_plan_index++) {
int node_index = execution_plan_[execution_plan_index];
TfLiteNode& node = nodes_and_registration_[node_index].first;
const TfLiteRegistration& registration =
nodes_and_registration_[node_index].second;
EnsureTensorsVectorCapacity();
if (OpPrepare(registration, &node) == kTfLiteError) {
return ReportOpError(&context_, node, registration, node_index,
"failed to prepare");
}
*last_execution_plan_index_prepared = execution_plan_index;
// Discontinue if the node has dynamic outputs. Note that we don't
// stop for dynamic temporary tensors since they won't affect the
// sizes of other tensors in the graph.
if (HasDynamicTensor(context_, node.outputs)) {
break;
}
}
return kTfLiteOk;
}
2.2.1 OpPrepare(registration, &node)
// Prepare the given 'node' for execution.
TfLiteStatus OpPrepare(const TfLiteRegistration& op_reg, TfLiteNode* node) {
if (op_reg.prepare == nullptr) return kTfLiteOk;
return op_reg.prepare(&context_, node);
}
2.3 memory_planner_->ExecuteAllocations
TfLiteStatus ArenaPlanner::ExecuteAllocations(int first_node, int last_node) {
// Grow the size of `allocs_` if necessary. This allows allocating temporary
// tensors in op's `prepare` function.
TF_LITE_ENSURE(context_, graph_info_->num_tensors() >= allocs_.size());
allocs_.resize(graph_info_->num_tensors());
TF_LITE_ENSURE_STATUS(CalculateAllocations(first_node, last_node));
TF_LITE_ENSURE_STATUS(Commit());
for (int i = 0; i < graph_info_->num_tensors(); ++i) {
// TODO(ahentz): we could do this only for the tensors that were modified
// in CalculateAllocations(), instead of redoing it for tensors that
// already had proper pointers. However we must be very careful, because
// SimpleMemoryArena::Commit() could move the base pointer.
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
}
return kTfLiteOk;
}
2.4 ResetVariableTensors
分配内存后reset variableTensors
TfLiteStatus Interpreter::ResetVariableTensors() {
for (auto& tensor : tensors_) {
if (!tensor.is_variable) {
continue;
}
// Variable tensors have to be `kTfLiteArenaRwPersistent`, and must be
// allocated after the initial `PrepareOpsAndTensors()` is called.
TF_LITE_ENSURE_EQ(&context_, tensor.allocation_type,
kTfLiteArenaRwPersistent);
TF_LITE_ENSURE(&context_, tensor.data.raw != nullptr);
memset(tensor.data.raw, 0, tensor.bytes);
}
return kTfLiteOk;
}