写3种方法,1.含CPU与GPU间的内存copy,异步inference,2. 内存零copy, 异步inference, 3. 内存零copy, 同步inference
- 含CPU与GPU间的内存copy,异步inference
大多代码都用的是这种方式
先声明一个input和output数组,
static float data[BATCH_SIZE * 3 * INPUT_H * INPUT_W]; //input
static float prob[BATCH_SIZE * OUTPUT_SIZE]; //output
读入engine文件
char *trtModelStream{nullptr};
string engine_name = "net.engine";
ifstream file(engine_name, std::ios::binary);
if (file.good()) {
file.seekg(0, file.end);
size = file.tellg();
file.seekg(0, file.beg);
trtModelStream = new char[size];
assert(trtModelStream);
file.read(trtModelStream, size);
file.close();
}
准备cudaEngine
IRuntime* runtime = createInferRuntime(gLogger);
assert(runtime != nullptr);
ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size);
assert(engine != nullptr);
IExecutionContext* context = engine->createExecutionContext();
assert(context != nullptr);
delete[] trtModelStream;
读入图片
cv::Mat img = cv::imread(string(argv[2]) + "/" + file_names[count]); //读一个文件夹下的图片,文件夹路径由参数传入
把img的数据传入data数组,这个要依具体需求而定,可能要预处理一下,这里略过
然后把input, output的指针传入inference函数
doInference(*context, data, prob, BATCH_SIZE);
doInference的实现细节如下:
void doInference(IExecutionContext& context, float* input, float* output, int batchSize)
{
const ICudaEngine& engine = context.getEngine();
// Pointers to input and output device buffers to pass to engine.
// Engine requires exactly IEngine::getNbBindings() number of buffers.
assert(engine.getNbBindings() == 2);
void* buffers[2];
// In order to bind the buffers, we need to know the names of the input and output tensors.
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);
const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
// Create GPU buffers on device
CHECK(cudaMalloc(&buffers[inputIndex], batchSize * 3 * INPUT_H * INPUT_W * sizeof(float)));
CHECK(cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float)));
// Create stream
cudaStream_t stream;
CHECK(cudaStreamCreate(&stream));
//这里把input的数据copy到buffer的input中,即把数据从CPU拷贝到GPU
CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
//inference
context.enqueue(batchSize, buffers, stream, nullptr);
//把推理的结果从buffer copy到output中,即把数据从GPU copy到CPU,这步耗时较多
CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
//做同步
cudaStreamSynchronize(stream);
// Release stream and buffers
cudaStreamDestroy(stream);
CHECK(cudaFree(buffers[inputIndex]));
CHECK(cudaFree(buffers[outputIndex]));
}
可以看到这里要为buffer保留内存,然后把input的数据从CPU copy到buffer中,推理完之后再把buffer中的结果copy到output
output这个指针传入的是prob,那么可以直接利用prob得到网络输出的结果,做一个softmax, 即得到分割结果
下面再用阈值分割出前景和背景,并把结果保存出来
cv::Mat mask_mat = cv::Mat(INPUT_H, INPUT_W, CV_8UC1);
char* ptmp = NULL;
for(int i = 0; i < INPUT_H; i++) {
ptmp = mask_mat.ptr<uchar>(i);
for(int j = 0; j < INPUT_W; j++) {
float numerator = exp(prob[i*INPUT_W + j]);
float pixel = numerator / (numerator + exp(prob[i*INPUT_W + j + INPUT_W*INPUT_H]) + 0.0001); //softmax
if(pixel > CONF_THRESH) {
ptmp[j] = 255;
} else {
ptmp[j] = 0;
}
}
}
cv::imwrite(filename, mask_mat);
- 内存零copy,异步inference
由于nx板子的CPU和GPU内存是共享的,不需要在它们之间copy数据
主要流程和上面一样,区别在于input和output数组,buffer的声明
input和output仅声明为指针,并保留内存
float* data;
float* prob;
CHECK(cudaHostAlloc((void **)&data, BATCH_SIZE * 3 * INPUT_H * INPUT_W * sizeof(float), cudaHostAllocMapped));
CHECK(cudaHostAlloc((void **)&prob, BATCH_SIZE * OUTPUT_SIZE * sizeof(float), cudaHostAllocMapped));
参数传入还是一样的
doInference_zeroCopy(*context, data, prob, BATCH_SIZE);
doInference_zeroCopy的实现如下
void doInference_zeroCopy(IExecutionContext& context, float* input, float* output, int batchSize)
{
cudaSetDeviceFlags(cudaDeviceMapHost);
const ICudaEngine& engine = context.getEngine();
// Pointers to input and output device buffers to pass to engine.
// Engine requires exactly IEngine::getNbBindings() number of buffers.
assert(engine.getNbBindings() == 2);
void* buffers[2];
const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);
const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
//buffer中仅传入指针
cudaHostGetDevicePointer((void**)&buffers[inputIndex], (void*)input, 0);
cudaHostGetDevicePointer((void**)&buffers[outputIndex], (void*)output, 0);
// Create stream
cudaStream_t stream;
CHECK(cudaStreamCreate(&stream));
context.enqueue(batchSize, buffers, stream, nullptr);
cudaStreamSynchronize(stream);
// Release stream and buffers
cudaStreamDestroy(stream);
}
- 内存零copy,同步inference
与异步inference的区别仅在于把doInference_zeroCopy函数中
context.enqueue(batchSize, buffers, stream, nullptr);
替换成
context.execute(batchSize, buffers);
由于不再需要stream,可以把stream的部分注释掉
经过测试,发现影响时间效率的主要在于图片的尺寸大小,
另外,在图片size 640x640时,同步推理比异步推理时间效率略高一些