AllocateTensors调用后就可以调用Invoke
// Invoke the interpreter (run the whole graph in dependency order).
//
// NOTE: It is possible that the interpreter is not in a ready state
// to evaluate (i.e. if a ResizeTensor() has been performed without an
// AllocateTensors().
// Returns status of success or failure.
TfLiteStatus Invoke();
TfLiteStatus Interpreter::Invoke() {
TfLiteStatus status = kTfLiteOk;
// Invocations are always done in node order.
// Note that calling Invoke repeatedly will cause the original memory plan to
// be reused, unless either ResizeInputTensor() or AllocateTensors() has been
// called.
// TODO(b/71913981): we should force recalculation in the presence of dynamic
// tensors, because they may have new value which in turn may affect shapes
// and allocations.
for (int execution_plan_index = 0;
execution_plan_index < execution_plan_.size(); execution_plan_index++) {
if (execution_plan_index == next_execution_plan_index_to_prepare_) {
TF_LITE_ENSURE_STATUS(PrepareOpsAndTensors());
TF_LITE_ENSURE(&context_, next_execution_plan_index_to_prepare_ >=
execution_plan_index);
}
int node_index = execution_plan_[execution_plan_index];
TfLiteNode& node = nodes_and_registration_[node_index].first;
const TfLiteRegistration& registration =
nodes_and_registration_[node_index].second;
SCOPED_OPERATOR_PROFILE(profiler_, node_index);
EnsureTensorsVectorCapacity();
tensor_resized_since_op_invoke_ = false;
if (OpInvoke(registration, &node) == kTfLiteError) {
status = ReportOpError(&context_, node, registration, node_index,
"failed to invoke");
}
}
}
OpInvoke
// Invoke the operator represented by 'node'.
TfLiteStatus OpInvoke(const TfLiteRegistration& op_reg, TfLiteNode* node) {
if (op_reg.invoke == nullptr) return kTfLiteError;
return op_reg.invoke(&context_, node);
}