Deepstream 之 appsink元素保存图片

目录

1. appsink元素介绍

2. appsink 元素结合cv::Mat保存图片

2.1 开发环境

2.2 软件代码与说明


1. appsink元素介绍

appsink 元素作为gstreamer的sink节点,它可以实现pipeline中的流媒体数据与其他应用程序的交换,比如实现gstreamer与CUDA交互,gstreamer与OpenCV交互,等等。

主要作用是借助gstreamer pipeline的 media streaming, 我们通过appsink用一个buffer来收集流媒体的数据,提供给其他应用程序来处理使用。

2. appsink 元素结合cv::Mat保存图片

2.1 开发环境

硬件:NVIDIA  jetson xavier agx

软件:Qt5.9、Opencv4.1.1、Deepstream 5.0、GStreamer-1.0

2.2 软件代码与说明

queue元素和nvvideoconvert元素创建省略,只说明保存元素的核心内容。

Qt配置opencv不在此说明。

  • 创建appsink元素,并且设置其caps,此caps满足nvvideoconvert元素的src输出格式:
    char caps[1024] = "video/x-raw,format=RGBA";

    GstCaps *video_caps;
    video_caps = gst_caps_from_string (caps);
    if(!video_caps){
        qCritical()<< "gst_caps_from_string fail";
        return false;
    }
    g_object_set (m_sinkBin.sub_bins[index].sink, "caps", video_caps, NULL);
  • 设置appsink元素的信号及其信号响应函数
    // link signal new-sample
    g_object_set(m_sinkBin.sub_bins[index].sink, "emit-signals", TRUE, "async", FALSE, NULL);
    g_signal_connect(m_sinkBin.sub_bins[index].sink, "new-sample", G_CALLBACK(SinkElement::newSample), NULL);
  • 在new-sample响应函数中保存当前视频帧为图片
GstSample *sample = gst_app_sink_pull_sample(appsink);  //need free sample handle
    caps = gst_sample_get_caps (sample);
    if (!caps) {
        g_print ("gst_sample_get_caps fail\n");
        gst_sample_unref (sample);
        return GST_FLOW_ERROR;
    }
    s = gst_caps_get_structure (caps, 0);
    gboolean res;
    res = gst_structure_get_int (s, "width", &width);		//获取图片的宽
    res |= gst_structure_get_int (s, "height", &height);	//获取图片的高
    if (!res) {
        g_print ("gst_structure_get_int fail\n");
        gst_sample_unref (sample);
        return GST_FLOW_ERROR;
    }
GstBuffer *buffer = gst_sample_get_buffer(sample);
char fileNameString[FILE_NAME_SIZE];
            snprintf (fileNameString, FILE_NAME_SIZE, "%d_%d_%d_%s_%dx%d.png",
                      frame_number, frame_meta->source_id, num_rects,
                      obj_meta->obj_label, width, height);
            qDebug()<< "fileNamstring: "<< fileNameString;

            //save jpg
            GstMapInfo mapInfo{};
            if(gst_buffer_map(buffer, &mapInfo, GST_MAP_READ))
            {
                std::string strFile(fileNameString);

                cv::Mat frame(cv::Size(width, height), CV_8UC4, (uchar*)mapInfo.data, cv::Mat::AUTO_STEP);
                if(cv::imwrite(strFile, frame))
                {
                    qInfo()<< "save " << strFile.c_str() << " succese";
                }
                else
                    qWarning()<<"save " << strFile.c_str() << " failed";
                gst_buffer_unmap (buffer, &mapInfo);	//解除映射
            }
gst_sample_unref (sample); //free sample

详细源码如下:


bool SinkElement::createEventPhotoFileSinkBin(uint index)
{
    gchar elem_name[50];
    static int uid = 0;

    g_snprintf (elem_name, sizeof (elem_name), "appsink_sub_bin%d", uid);
    m_sinkBin.sub_bins[index].bin = gst_bin_new (elem_name);
    if (!m_sinkBin.sub_bins[index].bin) {
        qCritical()<< "Failed to create "<< elem_name;
        return false;
    }

    //sink
    g_snprintf (elem_name, sizeof (elem_name), "appsink_sub_bin_%d", uid);
    m_sinkBin.sub_bins[index].sink = gst_element_factory_make ("appsink", elem_name);
    if (!m_sinkBin.sub_bins[index].sink) {
        qCritical()<< "Failed to create " << elem_name;
        return false;
    }

    char caps[1024] = "video/x-raw,format=RGBA";

    GstCaps *video_caps;
    video_caps = gst_caps_from_string (caps);
    if(!video_caps){
        qCritical()<< "gst_caps_from_string fail";
        return false;
    }
    g_object_set (m_sinkBin.sub_bins[index].sink, "caps", video_caps, NULL);

    // link signal new-sample
    g_object_set(m_sinkBin.sub_bins[index].sink, "emit-signals", TRUE, "async", FALSE, NULL);
    g_signal_connect(m_sinkBin.sub_bins[index].sink, "new-sample", G_CALLBACK(SinkElement::newSample), NULL);

    //transform
    g_snprintf (elem_name, sizeof (elem_name), "appsink_sub_bin_transform_%d", uid);
    m_sinkBin.sub_bins[index].transform =
            gst_element_factory_make ("nvvideoconvert", elem_name);
    if (!m_sinkBin.sub_bins[index].transform) {
        qCritical()<< "Failed to create " << elem_name;
        return false;
    }

    //queue
    g_snprintf (elem_name, sizeof (elem_name), "appsink_sub_bin_queue%d", uid);
    m_sinkBin.sub_bins[index].queue = gst_element_factory_make("queue", elem_name);
    if (! m_sinkBin.sub_bins[index].queue) {
        qCritical()<< "Failed to create " << elem_name;
        return false;
    }

    uid++;

    gst_bin_add_many(GST_BIN(m_sinkBin.sub_bins[index].bin),
                     m_sinkBin.sub_bins[index].queue,
                     m_sinkBin.sub_bins[index].transform,
                     m_sinkBin.sub_bins[index].sink, NULL);
    if(!gst_element_link_many(m_sinkBin.sub_bins[index].queue,
                              m_sinkBin.sub_bins[index].transform,
                              m_sinkBin.sub_bins[index].sink, NULL))
    {
        QString criticalOutput = QString::asprintf("Failed to link SinkBin.sub_bins[%d].queue, SinkBin.sub_bins[%d].transform, SinkBin.sub_bins[%d].sink!",
                                                   index, index, index);
        qCritical()<< criticalOutput;
        return false;
    }
    QString linkInfo = QString::asprintf("Linking SinkBin.sub_bins[%d].queue, SinkBin.sub_bins[%d].transform, SinkBin.sub_bins[%d].sink successfully",
                                         index, index, index);
    qInfo()<< linkInfo;

    NVGSTDS_BIN_ADD_GHOST_PAD (m_sinkBin.sub_bins[index].bin, m_sinkBin.sub_bins[index].queue, "sink");

    return true;

done:
    return false;
}

GstFlowReturn SinkElement::newSample(GstAppSink *appsink, gpointer data)
{
    Q_UNUSED(data);
    static int frame_number = 1;
    GstCaps *caps;
    GstStructure *s;
    gint width, height;

    GstSample *sample = gst_app_sink_pull_sample(appsink);  //need free sample handle

    if(!sample)
    {
        qCritical()<< "Get sample error!";
        return GST_FLOW_ERROR;
    }
    /*************************** 获取图片的宽,高 *************************/
    caps = gst_sample_get_caps (sample);
    if (!caps) {
        g_print ("gst_sample_get_caps fail\n");
        gst_sample_unref (sample);
        return GST_FLOW_ERROR;
    }
    s = gst_caps_get_structure (caps, 0);
    gboolean res;
    res = gst_structure_get_int (s, "width", &width);		//获取图片的宽
    res |= gst_structure_get_int (s, "height", &height);	//获取图片的高
    if (!res) {
        g_print ("gst_structure_get_int fail\n");
        gst_sample_unref (sample);
        return GST_FLOW_ERROR;
    }
    /*********************************************************************/

    /**************************** 获取事件 *****************************/
    GstBuffer *buffer = gst_sample_get_buffer(sample);
    if(!buffer)
    {
        qCritical()<< "get sample buffer error!";
        return GST_FLOW_ERROR;
    }
    NvDsBatchMeta* batch_meta = gst_buffer_get_nvds_batch_meta(buffer);

    NvDsMetaList* l_frame = NULL;
    NvDsMetaList* l_obj = NULL;
    NvDsObjectMeta* obj_meta = NULL;
    guint person_count = 0;
    guint num_rects = 0;
    for(l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next)
    {
        NvDsFrameMeta *frame_meta = (NvDsFrameMeta*)(l_frame->data);
        for(l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next)
        {
            obj_meta = (NvDsObjectMeta*)(l_obj->data);
            if(obj_meta->class_id == PGIE_CLASS_ID_PERSON)
            {
                person_count++;
                num_rects++;
            }
        }
        /**************************如果person_count > 0,则保存图片*********************************/
        if(person_count > 0)
        {
            char fileNameString[FILE_NAME_SIZE];
            snprintf (fileNameString, FILE_NAME_SIZE, "%d_%d_%d_%s_%dx%d.png",
                      frame_number, frame_meta->source_id, num_rects,
                      obj_meta->obj_label, width, height);
            qDebug()<< "fileNamstring: "<< fileNameString;

            //save jpg
            GstMapInfo mapInfo{};
            if(gst_buffer_map(buffer, &mapInfo, GST_MAP_READ))
            {
                std::string strFile(fileNameString);

                cv::Mat frame(cv::Size(width, height), CV_8UC4, (uchar*)mapInfo.data, cv::Mat::AUTO_STEP);
                if(cv::imwrite(strFile, frame))
                {
                    qInfo()<< "save " << strFile.c_str() << " succese";
                }
                else
                    qWarning()<<"save " << strFile.c_str() << " failed";
                gst_buffer_unmap (buffer, &mapInfo);	//解除映射
            }
        }

        frame_number++;
    }
    gst_sample_unref (sample);

    return GST_FLOW_OK;

}
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
deepstream是一种实时流式数据处理框架,可以用来处理和保存图片。在deepstream中,保存图片通常涉及到使用合适的插件和配置来实现。 首先,需要确保已经安装了deepstream,并且配置了用于处理图像的相关插件。然后,在配置文件中,需要设置要保存图片的路径和格式。可以使用deepstream的插件来实现将图像保存到本地或者远程服务器。在配置文件中,可以指定保存图片的触发条件,比如在检测到特定对象时保存图片,或者定时保存图片。还可以设置保存图片的规则,比如保存最新的图片,或者保存满足一定条件的图片。 在运行deepstream时,当满足保存图片的条件时,deepstream会调用相关的插件来实现图片的保存。保存的图片会按照之前在配置文件中设置的路径和格式保存到指定的位置。 除了保存图片deepstream还可以处理实时的视频流,并且可以对视频流进行分析和检测。通过深度学习和机器学习算法,deepstream可以实现对象检测、人脸识别等功能。保存图片是其中的一部分功能,但是它对于监控、安防等应用场景非常重要。 总之,deepstream保存图片需要通过配置文件设置保存路径和格式,使用相关的插件实现保存功能。通过深度学习和机器学习算法,deepstream可以实现更多的图像和视频处理功能,为各种应用场景提供了强大的支持。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值