deepstream6.1.1
配置文件:
custom-lib-path=/opt/nvidia/deepstream/deepstream/lib/gst-plugins/libcustom2d_preprocess.so
custom-tensor-preparation-function=CustomTensorPreparation
[group-0]
src-ids=0;1
custom-input-transformation-function=CustomAsyncTransformation
process-on-roi=1
roi-params-src-0=0;540;900;500;960;0;900;500
roi-params-src-1=0;540;900;500;960;0;900;500
//一些重要的结构体
/** per frame roi info */
typedef struct
{
/** list of roi vectors per frame */
std::vector<NvDsRoiMeta> roi_vector;
} GstNvDsPreProcessFrame;
typedef struct
{
/** Map src_id - Preprocess Frame meta */
std::unordered_map<gint, GstNvDsPreProcessFrame> framemeta_map;
}GstNvDsPreProcessGroup;
struct _GstNvDsPreProcess
{
/** pointer to buffer provided to custom library for tensor preparation */
NvDsPreProcessCustomBuf *tensor_buf;
/** Temporary NvBufSurface for input to batched transformations. */
NvBufSurface batch_insurf;
/** Temporary NvBufSurface for output from batched transformations. */
NvBufSurface batch_outsurf;
}
插件启动
gst_nvdspreprocess_start{
nvdspreprocess->custom_lib_ctx //解析配置文件的custom-lib
nvdspreprocess->custom_tensor_function //解析配置文件的custom-lib
nvdspreprocess->nvdspreprocess_groups[gcnt]->custom_transform //解析各分组的custom-input-transformation-function
nvdspreprocess->scaling_pool = gst_buffer_pool_new(); //准备GstBufferPool,用于尺寸填充缩放。
nvdspreprocess->tensor_pool = gst_buffer_pool_new(); //准备GstBufferPool,用于填充tensor。
//Create process queue
nvdspreprocess->preprocess_queue = g_queue_new ();
//Start a threa
nvdspreprocess->output_thread = g_thread_new ("nvdspreprocess-process-thread", gst_nvdspreprocess_output_loop, nvdspreprocess);
}
插件处理上游发来的buffer
gst_nvdspreprocess_submit_input_buffer{
gst_nvdspreprocess_on_frame
g_queue_push_tail (nvdspreprocess->preprocess_queue, buf_push_batch);
}
gst_nvdspreprocess_on_frame{
std::unique_ptr < NvDsPreProcessBatch > batch = nullptr;
if (batch == nullptr) {
flow_ret = gst_buffer_pool_acquire_buffer (nvdspreprocess->scaling_pool, &conv_gst_buf, nullptr);
memory = gst_nvdspreprocess_buffer_get_memory (conv_gst_buf);
}
for (guint gcnt = 0; gcnt < num_groups; gcnt ++) {
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
//没指定的source_id,就不处理了。
if (std::find(src_ids.begin(), src_ids.end(), source_id) == src_ids.end() && src_ids[0] != -1) {
GST_DEBUG_OBJECT (nvdspreprocess, "Group %d : No Source %d => skipping\n", gcnt, source_id);
continue;
}
auto get_preprocess_frame_meta = preprocess_group->framemeta_map.find(source_id); //source跟roi关系
preprocess_frame = get_preprocess_frame_meta->second;
roi_vector = preprocess_frame.roi_vector;
for (guint n = 0; n < preprocess_frame.roi_vector.size(); n++) {
if (preprocess_group->process_on_roi) {
roi_meta = roi_vector[n];
/** Process on ROIs provided from config file */
rect_params = roi_meta.roi;
} else {
/** Process on Full Frames 处理整帧,rect_params设置整帧大小*/
rect_params.left = 0;
rect_params.top = 0;
rect_params.width = in_surf->surfaceList[batch_index].width;
rect_params.height = in_surf->surfaceList[batch_index].height;
}
scale_and_fill_data(......) //缩放填充roi到模型需要的尺寸
NvDsPreProcessUnit unit;
...... //Adding a Unit (ROI/Crop/Full Frame) to the current batch.
batch->units.push_back (unit);
}
}
group_transformation(......) //调用group->custom_transform或batch_transformation进行填充缩放。输出会放到batch->converted_buf。
//例如:在batch_transformation中调NvBufSurfTransformSetSessionParams,NvBufSurfTransformAsync进行转换。
}
/** wait for async transformation 等待转换完成 */
for (guint gcnt = 0; gcnt < num_groups; gcnt ++) {
NvBufSurfTransformSyncObjWait(x->sync_obj, -1);
NvBufSurfTransformSyncObjDestroy(&x->sync_obj);
}
g_queue_push_tail (nvdspreprocess->preprocess_queue, batch.get()); //入队,等另一个线程处理
}
//进行归一化,打包tensor。
gst_nvdspreprocess_output_loop{
batch.reset ((NvDsPreProcessBatch *) g_queue_pop_head (nvdspreprocess->preprocess_queue));
if (batch->push_buffer) {
gst_pad_push();
}
for (guint i = 0; i < batch->units.size(); i++) {
custom_tensor_params.seq_params.roi_vector.push_back(batch->units[i].roi_meta);
}
/*tensor_buf到函数里会创建对象,nvdspreprocess->acquire_impl就是NvDsPreProcessAcquirerImpl指针,包含GstBuffer, .
custom_tensor_function就是CustomTensorPreparation*/
status = nvdspreprocess->custom_tensor_function(nvdspreprocess->custom_lib_ctx, batch.get(),
nvdspreprocess->tensor_buf, custom_tensor_params,
nvdspreprocess->acquire_impl.get());
/** attach user meta at batch level */
/** attach user meta at batch level, 将tensor作为user meta */
attach_user_meta_at_batch_level (nvdspreprocess, batch.get(), custom_tensor_params, status);
}
/**
* Holds the pointer for the allocated memory.
*/
typedef struct
{
/** surface corresponding to memory allocated */
NvBufSurface *surf;
/** Vector of cuda resources created by registering the above egl images in CUDA. */
std::vector<CUgraphicsResource> cuda_resources;
/** Vector of CUDA eglFrames created by mapping the above cuda resources. */
std::vector<CUeglFrame> egl_frames;
/** Pointer to the memory allocated for the batch of frames (DGPU). */
void *dev_memory_ptr; //分配的buffer,在gst_nvdspreprocess_allocator_alloc中通过cudaMalloc或cudaMalloc。
/** Vector of pointer to individual frame memories in the batch memory */
std::vector<void *> frame_memory_ptrs; //指向surf->surfaceList[i].dataPtr;
} GstNvDsPreProcessMemory;
NvDsPreProcessTensorImpl::prepare_tensor(NvDsPreProcessBatch* batch, void*& devBuf){ //第二个参数是输出
for (unsigned int i = 0; i < batch_size; i++)
{
float* outPtr =
(float*)devBuf + i * m_NetworkSize.channels*m_NetworkSize.width*m_NetworkSize.height;
convertFcn(outPtr, (unsigned char*)batch->units[i].converted_frame_ptr...... /*第一个参数是输出,第二个参数是输入,是填充缩放的结果,来源:unit.converted_frame_ptr = memory->frame_memory_ptrs[idx]; tmem->frame_memory_ptrs[i] = (char *) tmem->surf->surfaceList[i].dataPtr; */
}
}
NvDsPreProcessStatus
CustomTensorPreparation(CustomCtx *ctx, NvDsPreProcessBatch *batch, NvDsPreProcessCustomBuf *&buf,
CustomTensorParams &tensorParam, NvDsPreProcessAcquirer *acquirer)
{
NvDsPreProcessStatus status = NVDSPREPROCESS_TENSOR_NOT_READY;
/** acquire a buffer from tensor pool */
buf = acquirer->acquire(); //获取新的NvDsPreProcessCustomBufImpl,包含buffer, gstbuffer, GstNvDsPreProcessMemory.
/** Prepare Tensor 做归一化后生成tensor */
status = ctx->tensor_impl->prepare_tensor(batch, buf->memory_ptr);
return status;
}
attach_user_meta_at_batch_level{
GstNvDsPreProcessBatchMeta *preprocess_batchmeta = new GstNvDsPreProcessBatchMeta;
preprocess_batchmeta->tensor_meta = new NvDsPreProcessTensorMeta; //GstNvDsPreProcessBatchMeta包含NvDsPreProcessTensorMeta
preprocess_batchmeta->tensor_meta->private_data = new std::pair(nvdspreprocess, nvdspreprocess->tensor_buf);
preprocess_batchmeta->tensor_meta->raw_tensor_buffer =
((NvDsPreProcessCustomBufImpl *)nvdspreprocess->tensor_buf)->memory->dev_memory_ptr; //后面插件用到的数据。
preprocess_batchmeta->private_data = batch->converted_buf; //来自pointer to buffer from scaling pool
user_meta = nvds_acquire_user_meta_from_pool(batch_meta);
/* Set user meta below */
user_meta->user_meta_data = preprocess_batchmeta;
user_meta->base_meta.meta_type = (NvDsMetaType) NVDS_PREPROCESS_BATCH_META;
user_meta->base_meta.release_func = release_user_meta_at_batch_level;
user_meta->base_meta.batch_meta = batch_meta;
}