GstBufferPool与nvpreprocess

GstBufferPool 是个buffer池,默认函数定义参见gst_buffer_pool_class_init 。

gst_buffer_pool_class_init (GstBufferPoolClass * klass) {

GObjectClass *gobject_class = (GObjectClass *) klass;

gobject_class->dispose = gst_buffer_pool_dispose;

gobject_class->finalize = gst_buffer_pool_finalize;

klass->start = default_start;

klass->stop = default_stop;

klass->set_config = default_set_config;

klass->acquire_buffer = default_acquire_buffer;    

klass->reset_buffer = default_reset_buffer;   

klass->alloc_buffer = default_alloc_buffer;    

klass->release_buffer = default_release_buffer;

klass->free_buffer = default_free_buffer;

。。。。。。

}

default_acquire_buffer

调用gst_buffer_pool_acquire_buffer,会走到此函数。

1 先从queue里取buffer,如果取到就返回。

2 如果取不到,就先分配buffer,调用关系:

do_alloc_buffer--->  pclass->alloc_buffer  ---->default_alloc_buffer  ---->  gst_buffer_new_allocate(priv->allocator...)  --->  gst_allocator_alloc(allocator),

如果allocator不为空,如deepstream的GstNvDsPreProcessAllocator,用allocator_class->alloc对应的函数gst_nvdspreprocess_allocator_alloc。

如果allocator为空,就用gstallocator.c中的GstAllocator *_default_allocator,在_priv_gst_allocator_initialize中可以看到就是_sysmem_allocator,如下:

  _sysmem_allocator = g_object_new (gst_allocator_sysmem_get_type (), NULL);

  _default_allocator = gst_object_ref (_sysmem_allocator);

gst_allocator_sysmem_get_type 就是GstAllocatorSysmem类型,他的分配函数是default_alloc,如下:

static void  gst_allocator_sysmem_class_init (GstAllocatorSysmemClass * klass)  {

  GObjectClass *gobject_class;

  GstAllocatorClass *allocator_class;

  gobject_class = (GObjectClass *) klass;

  allocator_class = (GstAllocatorClass *) klass;

  gobject_class->finalize = gst_allocator_sysmem_finalize;

  allocator_class->alloc = default_alloc;

  allocator_class->free = default_free;

}

 default_release_buffer

此函数最重要的功能是将buffer回收入队列,如下:

default_release_buffer{

  gst_atomic_queue_push (pool->priv->queue, buffer);

}

gst_buffer_pool_config_set_allocator

设置外部allocator

开发流程:

gst_buffer_pool_new

--gst_buffer_pool_get_config

-- gst_buffer_pool_config_set_params     //Once a pool is created, it needs to be configured.

--gst_buffer_pool_config_set_allocator   //set external allocator

--gst_buffer_pool_set_config      //updates the configuration in the pool

--gst_buffer_pool_set_active    //After the a pool has been configured, it can be activated

------gst_buffer_pool_acquire_buffer  //When the pool is active, can be used to retrieve a buffer from the pool.

------gst_buffer_pool_release_buffer  // if not needed

------gst_buffer_pool_set_active  //The bufferpool can be deactivated again  

--------gst_object_unref  //lrelease bufferpool

以deepstream的nvpreprocess为例,可以看出完全按上面的开发流程。

/** Structure allocated internally by the allocator. */

typedef struct

{

  /** Should be the first member of a structure extending GstMemory. */

  GstMemory mem;

  /** Custom Gst memory for preprocess plugin */

  GstNvDsPreProcessMemory mem_preprocess;

} GstNvDsPreProcessMem;

GstNvDsPreProcessMemory *  gst_nvdspreprocess_buffer_get_memory (GstBuffer * buffer)

{

  GstMemory *mem;

  mem = gst_buffer_peek_memory (buffer, 0);  //看以看到固定取0索引。

  if (!mem || !gst_memory_is_type (mem, GST_NVDSPREPROCESS_MEMORY_TYPE))

    return nullptr;

  return &(((GstNvDsPreProcessMem *) mem)->mem_preprocess);

}

nvdspreprocess

  nvdspreprocess->scaling_pool = gst_buffer_pool_new();

  scaling_pool_config = gst_buffer_pool_get_config(nvdspreprocess->scaling_pool);

  gst_buffer_pool_config_set_params(scaling_pool_config, nullptr,

      sizeof (GstNvDsPreProcessMemory), nvdspreprocess->scaling_buf_pool_size,

      nvdspreprocess->scaling_buf_pool_size);

  scaling_pool_allocator = gst_nvdspreprocess_allocator_new (&allocator_info, 1, nvdspreprocess->gpu_id, FALSE);     //scale时,raw_buf_size是1, 宽高等信息由allocator_info指定。

  gst_buffer_pool_config_set_allocator (scaling_pool_config, scaling_pool_allocator,

      &allocation_params);

  if (!gst_buffer_pool_set_config (nvdspreprocess->scaling_pool, scaling_pool_config)) {

    GST_ELEMENT_ERROR (nvdspreprocess, RESOURCE, FAILED,

        ("Failed to set config on buffer scaling_pool"), (nullptr));

    goto error;

  }

  /* Start the buffer pool and allocate all internal buffers. */

  if (!gst_buffer_pool_set_active (nvdspreprocess->scaling_pool, TRUE)) {

    GST_ELEMENT_ERROR (nvdspreprocess, RESOURCE, FAILED,

        ("Failed to set buffer pool to active"), (nullptr));

    goto error;

  }

获取buffer的地方:

    flow_ret =  gst_buffer_pool_acquire_buffer (nvdspreprocess->scaling_pool, &conv_gst_buf,

        nullptr);

    if (flow_ret != GST_FLOW_OK) {

      return flow_ret;

    }

    memory = gst_nvdspreprocess_buffer_get_memory (conv_gst_buf);

    if (!memory) {

      return GST_FLOW_ERROR;

    }

//真正分配内存的地方

static GstMemory *

gst_nvdspreprocess_allocator_alloc (GstAllocator * allocator, gsize size,

    GstAllocationParams * params)

{

  GstNvDsPreProcessAllocator *preprocess_allocator = GST_NVDSPREPROCESS_ALLOCATOR (allocator);

  GstNvDsPreProcessMem *nvmem = new GstNvDsPreProcessMem;

  GstNvDsPreProcessMemory *tmem = &nvmem->mem_preprocess;

  NvBufSurfaceCreateParams create_params = { 0 };

  cudaError_t cudaReturn = cudaSuccess;

  if (preprocess_allocator->info == NULL) {

      。。。

  }

  create_params.gpuId = preprocess_allocator->gpu_id;

  create_params.width = preprocess_allocator->info->width;

  create_params.height = preprocess_allocator->info->height;

  create_params.size = 0;

  create_params.isContiguous = 1;

  create_params.colorFormat = preprocess_allocator->info->color_format;

  create_params.layout = NVBUF_LAYOUT_PITCH;

  create_params.memType = preprocess_allocator->info->memory_type;

  if (NvBufSurfaceCreate (&tmem->surf, preprocess_allocator->info->batch_size,

          &create_params) != 0) {

    GST_ERROR ("Error: Could not allocate internal buffer pool for nvdspreprocess");

    return nullptr;

  }

  if(tmem->surf->memType == NVBUF_MEM_SURFACE_ARRAY) {

    if (NvBufSurfaceMapEglImage (tmem->surf, -1) != 0) {

      GST_ERROR ("Error: Could not map EglImage from NvBufSurface for nvdspreprocess");

      return nullptr;

    }

    tmem->egl_frames.resize (preprocess_allocator->info->batch_size);

    tmem->cuda_resources.resize (preprocess_allocator->info->batch_size);

  }

  tmem->frame_memory_ptrs.assign (preprocess_allocator->info->batch_size, nullptr);

  for (guint i = 0; i < preprocess_allocator->info->batch_size; i++) {

    if(tmem->surf->memType == NVBUF_MEM_SURFACE_ARRAY) {

      if (cuGraphicsEGLRegisterImage (&tmem->cuda_resources[i],

              tmem->surf->surfaceList[i].mappedAddr.eglImage,

              CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE) != CUDA_SUCCESS) {

        g_printerr ("Failed to register EGLImage in cuda\n");

        return nullptr;

      }

      if (cuGraphicsResourceGetMappedEglFrame (&tmem->egl_frames[i],

              tmem->cuda_resources[i], 0, 0) != CUDA_SUCCESS) {

        g_printerr ("Failed to get mapped EGL Frame\n");

        return nullptr;

      }

      tmem->frame_memory_ptrs[i] = (char *) tmem->egl_frames[i].frame.pPitch[0];

    }

    else {

      /* Calculate pointers to individual frame memories in the batch memory and

      * insert in the vector. */

      tmem->frame_memory_ptrs[i] = (char *) tmem->surf->surfaceList[i].dataPtr;

    }

  }

  /* Initialize the GStreamer memory structure. */

  gst_memory_init ((GstMemory *) nvmem, (GstMemoryFlags) 0, allocator, nullptr,

      size, params->align, 0, size);

  return (GstMemory *) nvmem;

}

tensor_pool

  nvdspreprocess->tensor_pool = gst_buffer_pool_new();

  tensor_pool_config = gst_buffer_pool_get_config(nvdspreprocess->tensor_pool);

  gst_buffer_pool_config_set_params(tensor_pool_config, nullptr,

      sizeof (GstNvDsPreProcessMemory), nvdspreprocess->tensor_buf_pool_size,

      nvdspreprocess->tensor_buf_pool_size);

  tensor_pool_allocator = gst_nvdspreprocess_allocator_new (NULL, nvdspreprocess->tensor_params.buffer_size,  nvdspreprocess->gpu_id, FALSE);   //tensor时raw_buf_size不是1,比如:7*3*272*480*4, 4指float32。

  gst_buffer_pool_config_set_allocator (tensor_pool_config, tensor_pool_allocator,

      &tensor_pool_allocation_params);

  if (!gst_buffer_pool_set_config (nvdspreprocess->tensor_pool, tensor_pool_config)) {

    GST_ELEMENT_ERROR (nvdspreprocess, RESOURCE, FAILED,

        ("Failed to set config on tensor buffer pool"), (nullptr));

    goto error;

  }

  /* Start the buffer pool and allocate all internal buffers. */

  if (!gst_buffer_pool_set_active (nvdspreprocess->tensor_pool, TRUE)) {

    GST_ELEMENT_ERROR (nvdspreprocess, RESOURCE, FAILED,

        ("Failed to set tensor buffer pool to active"), (nullptr));

    goto error;

  }

获取buffer的地方:

  /** class for acquiring/releasing buffer from tensor pool */

  nvdspreprocess->acquire_impl = std::make_unique <NvDsPreProcessAcquirerImpl> (nvdspreprocess->tensor_pool);

     status = nvdspreprocess->custom_tensor_function(nvdspreprocess->custom_lib_ctx, batch.get(),

                                                      nvdspreprocess->tensor_buf, custom_tensor_params,

                                                      nvdspreprocess->acquire_impl.get());

NvDsPreProcessCustomBuf* NvDsPreProcessAcquirerImpl::acquire()

{

  GstBuffer *gstbuf;

  GstNvDsPreProcessMemory *memory;

  GstFlowReturn flow_ret;

  flow_ret =

      gst_buffer_pool_acquire_buffer (m_gstpool, &gstbuf,

      nullptr);

  if (flow_ret != GST_FLOW_OK) {

    GST_ERROR ("error while acquiring buffer from tensor pool\n");

    return nullptr;

  }

  memory = gst_nvdspreprocess_buffer_get_memory (gstbuf);

  if (!memory) {

    GST_ERROR ("error while getting memory from tensor pool\n");

    return nullptr;

  }

  return new NvDsPreProcessCustomBufImpl {{memory->dev_memory_ptr}, gstbuf, memory};

}

真正分配内存地方:

/* Function called by GStreamer buffer pool to allocate memory using this

 * allocator. */

static GstMemory *

gst_nvdspreprocess_allocator_alloc (GstAllocator * allocator, gsize size,

    GstAllocationParams * params)

{

  GstNvDsPreProcessAllocator *preprocess_allocator = GST_NVDSPREPROCESS_ALLOCATOR (allocator);

  GstNvDsPreProcessMem *nvmem = new GstNvDsPreProcessMem;

  GstNvDsPreProcessMemory *tmem = &nvmem->mem_preprocess;

  NvBufSurfaceCreateParams create_params = { 0 };

  cudaError_t cudaReturn = cudaSuccess;

  if (preprocess_allocator->info == NULL) {

    if (preprocess_allocator->debug_tensor) {

      cudaReturn = cudaMallocHost(&tmem->dev_memory_ptr, preprocess_allocator->raw_buf_size);

    } else {

      cudaReturn = cudaMalloc(&tmem->dev_memory_ptr, preprocess_allocator->raw_buf_size);

    }

    if (cudaReturn != cudaSuccess) {

      GST_ERROR ("failed to allocate cuda malloc for tensor with error %s",

          cudaGetErrorName (cudaReturn));

    return nullptr;

    }

    /* Initialize the GStreamer memory structure. */

    gst_memory_init ((GstMemory *) nvmem, (GstMemoryFlags) 0, allocator, nullptr,

        size, params->align, 0, size);

    return (GstMemory *) nvmem;

  }

。。。。。。

}

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

山西茄子

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值