save:
std::ofstream p(filename, std::ios::binary);
auto gieModelStream = mEngine->serialize();
p.write((const char*)gieModelStream->data(), gieModelStream->size());
p.close();
gieModelStream->destroy();
load :
struct stat my_stat;
char* filename = "./onnx_engine.engine";
if (stat(filename, &my_stat) == 0) {
initLibNvInferPlugins(&gLogger.getTRTLogger(), "");
std::ifstream engineFile(filename, std::ios::binary);
if (!engineFile)
{
std::cout << "Error opening engine file: " << filename << std::endl;
return nullptr;
}
engineFile.seekg(0, engineFile.end);
long int fsize = engineFile.tellg();
engineFile.seekg(0, engineFile.beg);
std::vector<char> engineData(fsize);
engineFile.read(engineData.data(), fsize);
SampleUniquePtr<IRuntime> runtime{ createInferRuntime(gLogger.getTRTLogger()) };
auto mEngine = runtime->deserializeCudaEngine(engineData.data(), engineData.size(), nullptr);
nvinfer1::Dims mInputDims = mEngine->getBindingDimensions(0);
}
亲测good! tensorRT8;
**************************************************************************************************************
#include "NvInferPlugin.h"
initLibNvInferPlugins(&gLogger.getTRTLogger(), "");
不加这个会报错!!!!!!!
错误代码:[pluginV2Runner.cpp::nvinfer1::rt::load::290] Error Code 1: Serialization (Serialization assertion creator failed.Cannot deserialize plugin since corresponding IPluginCreator not found in Plugin Registry)
[12/14/2021-10:38:14] [E] [TRT] 4: [runtime.cpp::nvinfer1::Runtime::deserializeCudaEngine::50] Error Code 4: Internal Error (Engine deserialization failed.)
参考:Developer Guide :: NVIDIA Deep Learning TensorRT Documentation