deepstream批量入人脸底库

用mtcnn作为主检测器,进行人脸检测,再送入facenet进行特征入库。
使用的插件是multifilesrc,支持批量输入图片,但是要求格式是按数字顺序排列。
插件参考官网:https://gstreamer.freedesktop.org/documentation/multifile/multifilesrc.html?gi-language=c

设置location参数为"IMG%02d.jpeg",
插件会顺序读取并处理图片:IMG00.jpeg~IMG99.jpeg;
设置location参数为"IMG%04d.jpeg",
插件会顺序读取并处理图片:IMG0000.jpeg~IMG9999.jpeg;
部分代码:

#define IMAGE_PATH "/opt/nvidia/deepstream/deepstream-6.0/sources/apps/sample_apps/deepstream-infer-tensor-meta-test/diku_img/WechatIMG%02d.jpeg"
std::fstream write;
static GstPadProbeReturn
sgie_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer u_data)
{
  static guint use_device_mem = 0;

  NvDsBatchMeta *batch_meta =
      gst_buffer_get_nvds_batch_meta (GST_BUFFER (info->data));

  static int img_id=0;
  img_id = img_id+1;
  write << "\n" <<  img_id << std::endl;
  /* Iterate each frame metadata in batch */
  for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;

    /* Iterate object metadata in frame */
    for (NvDsMetaList * l_obj = frame_meta->obj_meta_list; l_obj != NULL;
        l_obj = l_obj->next) {
      NvDsObjectMeta *obj_meta = (NvDsObjectMeta *) l_obj->data;

      /* Iterate user metadata in object to search SGIE's tensor data */
      for (NvDsMetaList * l_user = obj_meta->obj_user_meta_list; l_user != NULL;
          l_user = l_user->next) {
        NvDsUserMeta *user_meta = (NvDsUserMeta *) l_user->data;
        if (user_meta->base_meta.meta_type != NVDSINFER_TENSOR_OUTPUT_META)
          continue;

        /* convert to tensor metadata */
        NvDsInferTensorMeta *meta =
            (NvDsInferTensorMeta *) user_meta->user_meta_data;
        //std::cout<<"size "<< *(unsigned int *)meta->num_output_layers;
        for (unsigned int i = 0; i < meta->num_output_layers; i++) {
          NvDsInferLayerInfo *info = &meta->output_layers_info[i];
          
          info->buffer = meta->out_buf_ptrs_host[i];
          float (*array)[130] = (float (*)[130]) info->buffer;

          std::cout<<"Shape "<<info->inferDims.numElements<<std::endl;
          std::cout<<"128d Tensor"<<std::endl;
          for(int m =0;m<info->inferDims.numElements;m++){
          std::cout<<" "<< (*array)[m];
          
	        write << (*array)[m] << " ";
          }     
          if (use_device_mem && meta->out_buf_ptrs_dev[i]) {
            cudaMemcpy (meta->out_buf_ptrs_host[i], meta->out_buf_ptrs_dev[i],
                info->inferDims.numElements * 4, cudaMemcpyDeviceToHost);
          }
          
        }
              }
     
    }
  
  }

  use_device_mem = 1 - use_device_mem;
  return GST_PAD_PROBE_OK;
}
int
main (int argc, char *argv[])
{
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  /* Create gstreamer elements */

  /* Create Pipeline element that will be a container of other elements */
  pipeline = gst_pipeline_new ("dstensor-pipeline");
  // source = gst_element_factory_make ("filesrc", "file-source");
  source = gst_element_factory_make ("multifilesrc", "file-source");
  
  parser = gst_element_factory_make ("jpegparse", "jpeg-parser");
  decoder = gst_element_factory_make ("nvv4l2decoder", "nvv4l2-decoder");

  /* Create nvstreammux instance to form batches from one or more sources. */
  streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");

  if (!pipeline || !streammux) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }
  gst_bin_add (GST_BIN (pipeline), streammux);
  pgie = gst_element_factory_make (infer_plugin, "primary-nvinference-engine");

  /* Add queue elements between every two elements */
  //queue = gst_element_factory_make ("queue", "queue");
  queue1 = gst_element_factory_make ("queue", "queue1");
  queue2 = gst_element_factory_make ("queue", "queue2");
  queue3 = gst_element_factory_make ("queue", "queue3");
  queue4 = gst_element_factory_make ("queue", "queue4");
  queue5 = gst_element_factory_make ("queue", "queue5");
  queue6 = gst_element_factory_make ("queue", "queue6");


  /* We need three secondary gies so lets create 3 more instances of
     nvinfer */
  sgie1 = gst_element_factory_make (infer_plugin, "secondary1-nvinference-engine");

  /* Use nvtiler to composite the batched frames into a 2D tiled array based
   * on the source of the frames. */
  tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");

  /* Use convertor to convert from NV12 to RGBA as required by nvosd */
  nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");

  /* Create OSD to draw on the converted RGBA buffer */
  nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");

  tee = gst_element_factory_make("tee", "tee");
  /* Finally render the osd output */
#ifdef PLATFORM_TEGRA
  //transform = gst_element_factory_make ("nvegltransform", "nvegl-transform");
  transform = gst_element_factory_make ("queue", "queue");
#endif
  //sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
  sink = gst_element_factory_make ("nvoverlaysink", "nvvideo-renderer");

  if (!source || !parser || !decoder || !tee || !pgie || !tiler || !nvvidconv || !nvosd || !sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

  // g_object_set (G_OBJECT (source), "location", path.c_str(), NULL);
  g_object_set (G_OBJECT (source), "location", IMAGE_PATH, NULL);

#ifdef PLATFORM_TEGRA
  if(!transform) {
    g_printerr ("One tegra element could not be created. Exiting.\n");
    return -1;
  }
#endif

  g_object_set (G_OBJECT(sink), "sync", FALSE, NULL);

  g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
      MUXER_OUTPUT_HEIGHT,"batch-size", 1,
      "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);
  /* Override the batch-size set in the config file with the number of sources. */
  g_object_get (G_OBJECT (pgie), "batch-size", &pgie_batch_size, NULL);
  
  tiler_rows = (guint) sqrt (1);
  tiler_columns = (guint) ceil (1.0 * 1 / tiler_rows);
  /* we set the tiler properties here */
  g_object_set (G_OBJECT (tiler), "rows", tiler_rows, "columns", tiler_columns,
      "width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);

  g_object_set (G_OBJECT (nvosd), "process-mode", OSD_PROCESS_MODE,
      "display-text", OSD_DISPLAY_TEXT, NULL);

  g_object_set (G_OBJECT (sink), "qos", 0, NULL);
  if (!is_nvinfer_server) {
    /* nvinfer Output tensor meta can be enabled by set "output-tensor-meta=true"
     * here or enable this attribute in config file. */
    g_object_set (G_OBJECT (pgie), "config-file-path", INFER_PGIE_CONFIG_FILE,
        "output-tensor-meta", TRUE, "batch-size", 1, NULL);
    g_object_set (G_OBJECT (sgie1), "config-file-path", INFER_SGIE1_CONFIG_FILE,
        "output-tensor-meta", TRUE, "process-mode", 2, NULL);
    g_printerr ("zlllllllll.\n");
    g_object_set (G_OBJECT (pgie), "config-file-path", INFERSERVER_PGIE_CONFIG_FILE,
        "batch-size", 1, NULL);
    g_object_set (G_OBJECT (sgie1), "config-file-path", INFERSERVER_SGIE1_CONFIG_FILE,
        "process-mode", 2, NULL);
  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* Set up the pipeline */
  /* we add all elements into the pipeline */
  /* decoder | pgie1 | sgie1 | sgie2 | sgie3 | etc.. */
#ifdef PLATFORM_TEGRA
  gst_bin_add_many (GST_BIN (pipeline), source, parser, decoder, tee, queue1, pgie, queue2, sgie1, queue3, tiler, queue4,
      nvvidconv, queue5, nvosd, queue6, transform, sink, NULL);
  /* we link the elements together
   * nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */
  if (!gst_element_link_many (streammux, queue1, pgie, queue2, sgie1,queue3, tiler, queue4,
        nvvidconv, queue5, nvosd, queue6, transform, sink, NULL)) {
    g_printerr ("Elements could not be linked. Exiting.\n");
    return -1;
  }
#else
gst_bin_add_many (GST_BIN (pipeline), source, parser, decoder, tee, queue1, pgie, queue2, sgie1, queue3, tiler, queue4,
    nvvidconv, queue5, nvosd, queue6, sink, NULL);
  /* we link the elements together
   * nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */
  if (!gst_element_link_many (streammux, queue1, pgie, queue2, sgie1, queue3, tiler, queue4,
        nvvidconv, queue5, nvosd, queue6, sink, NULL)) {
    g_printerr ("Elements could not be linked. Exiting.\n");
    return -1;
  }
#endif

   GstPad *sinkpad, *srcpad;
   gchar pad_name_sink[16] = {};
   gchar pad_name_src[16] = {};

   g_snprintf (pad_name_sink, 15, "sink_0");
   g_snprintf (pad_name_src, 15, "src_0");
   sinkpad = gst_element_get_request_pad (streammux, pad_name_sink);
   if (!sinkpad) {
       g_printerr ("Streammux request sink pad failed. Exiting.\n");
       return -1;
   }

   srcpad = gst_element_get_request_pad(tee, pad_name_src);
   if (!srcpad) {
       g_printerr ("tee request src pad failed. Exiting.\n");
       return -1;
   }

   if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
       g_printerr ("Failed to link tee to stream muxer. Exiting.\n");
       return -1;
   }

   gst_object_unref (sinkpad);
   gst_object_unref (srcpad);
   if (!gst_element_link_many (source, parser, decoder, tee, NULL)) {
       g_printerr ("Elements could not be linked: 1. Exiting.\n");
       return -1;
    }
  /* Add probe to get informed of the meta data generated, we add probe to
   * the sink pad of the osd element, since by that time, the buffer would have
   * had got all the metadata. */
  osd_sink_pad = gst_element_get_static_pad (nvosd, "sink");
  if (!osd_sink_pad)
    g_print ("Unable to get sink pad\n");
  else
    gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
        osd_sink_pad_buffer_probe, NULL, NULL);
  gst_object_unref (osd_sink_pad);

  write.open("./feature.txt");
  write << "WechatIMG%02d.jpeg" << std::endl;

  tiler_sink_pad = gst_element_get_static_pad (tiler, "sink");
  gst_pad_add_probe (tiler_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
      sgie_pad_buffer_probe, NULL, NULL);
  
  /* Set the pipeline to "playing" state */
  g_print ("Now playing...\n");
  gst_element_set_state (pipeline, GST_STATE_PLAYING);
  
  /* Iterate */
  g_print ("Running...\n");
  g_main_loop_run (loop);

  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  write.close();
  return 0;
}
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值