jetpack视频硬解码

参考链接: https://docs.nvidia.com/jetson/l4t-multimedia/index.html

处理步骤

The following diagram shows the flow through this sample.
在这里插入图片描述
The Output Plane receives input in bitstream format and delivers it to the Decoder for decoding.
The Capture Plane transfer decoded frames to the application in YUV format.
For the Output Plane the application supports MMAP and USRPTR memory types. For the Capture Plane it supports MMAP and DMABUF memory types.
The application can also dump files from the Capture Plane.

初始化

一:设置解码器参数
二:创建新的V4L2视频解码器对象
三:创建RGB DMA FD
四:映射到MMAP存储器,并开启流
伪代码示例如下:

/**
  * InitDecoder.
  *
  * @param width: width
  * @param height: height
  * @param cb: callback
  * @param user_data: callback user_data
  * return context_t* ctx
  */
context_t* InitDecoder(int width, int height, OnDecodeRecv cb, void* user_data)
{
    context_t *ctx = new context_t();
    if (ctx == NULL) {
        printf("[E]: ctx create err\n");
        return NULL;
    }
    // Initialisation.
    // memset(static_cast<void*>(&ctx), 0, sizeof (context_t));
    memset(ctx, 0, sizeof (context_t));
    ctx->out_pixfmt = 1; //1=nv12; 0=yuv420,用nv12稳定点
    ctx->decode_pixfmt = V4L2_PIX_FMT_H264;
    ctx->op_mem_type = V4L2_MEMORY_MMAP;
    ctx->cp_mem_type = V4L2_MEMORY_DMABUF;
    ctx->op_buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
    ctx->cp_buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    ctx->fd = -1;
    ctx->dst_dma_fd = -1;
    ctx->num_queued_op_buffers = 0;
    ctx->op_buffers = NULL;
    ctx->cp_buffers = NULL;
    ctx->display_height = height;
    ctx->display_width = width;

    pthread_mutex_init(&ctx->queue_lock, NULL);
    pthread_cond_init(&ctx->queue_cond, NULL);
    ctx->cb = cb;
    ctx->user_data = user_data;

    int ret = 0;
    int flags = 0;
    uint32_t idx = 0;
    struct v4l2_capability caps;        //解码能力结构体
    struct v4l2_buffer op_v4l2_buf;
    struct v4l2_plane op_planes[MAX_PLANES];
    struct v4l2_exportbuffer op_expbuf; //视频输出buffer结构体
    NvBufSurfaceAllocateParams rgbParams = {{0}};
    NvBufSurface *rgb_nvbuf_surf = NULL;

    /* The call creates a new V4L2 Video Decoder object
    ** on the device node "/dev/nvhost-nvdec"
    ** Additional flags can also be given with which the device
    ** should be opened.
    ** This opens the device in Blocking mode.
    */
    ctx->fd = v4l2_open(DECODER_DEV, flags | O_RDWR); //创建一个解码对象,默认是阻塞模式
    if (ctx->fd == -1)
    {
        cerr << "Could not open device" << DECODER_DEV << endl;
        ctx->in_error = 1;
        return NULL;
    }

    ret = v4l2_ioctl(ctx->fd, VIDIOC_QUERYCAP, &caps); //VIDIOC_QUERYCAP:查询设备能力集
    if (ret)
    {
        cerr << "Failed tp query video capabilities" << endl;
        ctx->in_error = 1;
        return NULL;
    }
    //V4L2_CAP_VIDEO_M2M_MPLANE: The device supports the multi-planar API through the Video Memory-To-Memory interface.
    if (!(caps.capabilities & V4L2_CAP_VIDEO_M2M_MPLANE)) //查询是否具备指定能力
    {
        cerr << "Device does not support V4L2_CAP_VIDEO_M2M_MPLANE" << endl;
        ctx->in_error = 1;
        return NULL;
    }

   /* 订阅V4L2_EVENT_RESOLUTION_CHANGE这个事件: 视频源参数改变或视频格式改变
   ** 当有新视频输入时会触发该事件 */
    ret = subscribe_event(ctx->fd, V4L2_EVENT_RESOLUTION_CHANGE, 0, 0);
    if (ret)
    {
        cerr << "Failed to subscribe for resolution change" << endl;
        ctx->in_error = 1;
        return NULL;
    }

    //V4L2_CID_MPEG_VIDEO_DISABLE_COMPLETE_FRAME_INPUT: 可以把数据块当输入源,而不必是完整的图像帧
    ret = set_ext_controls(ctx->fd, V4L2_CID_MPEG_VIDEO_DISABLE_COMPLETE_FRAME_INPUT, 1); //设置额外的控制选项
    if (ret)
    {
        cerr << "Failed to set control disable complete frame" << endl;
        ctx->in_error = 1;
        return NULL;
    }

    //创建RGB DMA FD
    rgbParams.params.width  = ctx->display_width;
    rgbParams.params.height = ctx->display_height;
    rgbParams.params.memType = NVBUF_MEM_SURFACE_ARRAY;
    rgbParams.params.layout = NVBUF_LAYOUT_PITCH;
    rgbParams.params.colorFormat = NVBUF_COLOR_FORMAT_RGBA;
    rgbParams.memtag = NvBufSurfaceTag_VIDEO_CONVERT;
    fill_bytes_per_pixel(rgbParams.params.colorFormat, bytes_per_pixel_destfmt);
    ret = NvBufSurfaceAllocate(&rgb_nvbuf_surf, 1, &rgbParams);
    if (ret)
    {
        cerr << "Creation of dmabuf failed" << endl;
        ctx->in_error = 1;
        return NULL;
    }
    rgb_nvbuf_surf->numFilled = 1;
    ctx->rgb_dma_fd = rgb_nvbuf_surf->surfaceList[0].bufferDesc;

    //output plane用于接收输入
    ret = set_output_plane_format(ctx, ctx->decode_pixfmt, CHUNK_SIZE); //设置output plane格式,设置为H.264格式
    if (ret)
    {
        cerr << "Error in setting output plane format" << endl;
        ctx->in_error = 1;
        return NULL;
    }

    ret = req_buffers_on_output_plane(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, ctx->op_mem_type, 10); //申请output plane的内存大小
    if (ret)
    {
        cerr << "Error in requesting buffers on output plane" << endl;
        ctx->in_error = 1;
        return NULL;
    }

    /* Query the status of requested buffers.
    ** For each requested buffer, export buffer
    ** and map it for MMAP memory.
    */
    for (uint32_t i = 0; i < ctx->op_num_buffers; ++i) //之前申请output plane内存时设置了ctx->op_num_buffers
    {
        memset(&op_v4l2_buf, 0, sizeof (struct v4l2_buffer));
        memset(op_planes, 0, sizeof (op_planes));
        op_v4l2_buf.index = i;
        op_v4l2_buf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
        op_v4l2_buf.memory = ctx->op_mem_type;
        op_v4l2_buf.m.planes = op_planes;
        op_v4l2_buf.length = ctx->op_num_planes;

        ret = v4l2_ioctl(ctx->fd, VIDIOC_QUERYBUF, &op_v4l2_buf); //VIDIOC_QUERYBUF: 查询v4l2_buffer的状态
        if (ret)
        {
            cerr << "Error in querying buffers" << endl;
            ctx->in_error = 1;
            return NULL;
        }

        for (uint32_t j = 0; j < ctx->op_num_planes; ++j)
        {
            ctx->op_buffers[i]->planes[j].length     = op_v4l2_buf.m.planes[j].length;
            ctx->op_buffers[i]->planes[j].mem_offset = op_v4l2_buf.m.planes[j].m.mem_offset;
        }

        memset(&op_expbuf, 0, sizeof (struct v4l2_exportbuffer));
        op_expbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
        op_expbuf.index = i;

        for (uint32_t j = 0; j < ctx->op_num_planes; ++j)
        {
            op_expbuf.plane = j;
            ret = v4l2_ioctl(ctx->fd, VIDIOC_EXPBUF, &op_expbuf);
            if (ret)
            {
                cerr << "Error in exporting buffer at index" << i << endl;
                ctx->in_error = 1;
                return NULL;
            }
            ctx->op_buffers[i]->planes[j].fd = op_expbuf.fd;
        }

        if (ctx->op_buffers[i]->map()) //output plane做内存映射操作
        {
            cerr << "Buffer mapping error on output plane" << endl;
            ctx->in_error = 1;
            return NULL;
        }
    }

    /* Start stream processing on output plane
    ** by setting the streaming status ON.
    */
    ret = v4l2_ioctl(ctx->fd, VIDIOC_STREAMON, &ctx->op_buf_type); //VIDIOC_STREAMON: 开始取I/O流
    if (ret != 0)
    {
        cerr << "Streaming error on output plane" << endl;
        ctx->in_error = 1;
        return NULL;
    }
    ctx->op_streamon = 1;


    pthread_create(&ctx->dec_capture_thread, NULL, capture_thread, ctx); //创建capture loop线程
    return ctx;
}

解码过程

一:将输入的视频帧,转换成 NvBuffer pointer,放到队列中。
二:通过条件变量通知阻塞线程处理输入 NvBuffer pointer
三:解码后的yuv格式会从回调函数中输出。
伪代码示例如下:

static int decode_process(context_t* ctx, const char *data, unsigned int len)
{
    int ret_val;

    /* As all the output plane buffers are queued, a buffer
    ** is dequeued first before new data is read and queued back.
    */
    if (!ctx->in_error) //这里只要读一次就行,因为入参传进来就是一帧一帧传的,不是视频流
    {
        struct v4l2_buffer v4l2_buf;
        struct v4l2_plane planes[MAX_PLANES];
        Buffer *buffer = NULL;

        memset(&v4l2_buf, 0, sizeof (v4l2_buf));
        memset(planes, 0, sizeof (planes));

        v4l2_buf.m.planes = planes;
        // Dequeue the empty buffer on output plane.
        ret_val = dq_buffer(ctx, v4l2_buf, &buffer, ctx->op_buf_type, ctx->op_mem_type, -1);
        if (ret_val)
        {
            cerr << "Error DQing buffer at output plane" << endl;
            ctx->in_error = 1;
            return -1;
        }

        /**
		  * Read the input chunks for h264/H265/Mpeg2/Mpeg4 decoder.
		  *
		  * @param stream : Input stream
		  * @param buffer : NvBuffer pointer
		  */
        if (ctx->decode_pixfmt == V4L2_PIX_FMT_H264)
        {
            read_decoder_input_chunk(data, len, buffer);
        }
        else
        {
            cout << "Currently only H264 supported" << endl;
            ctx->in_error = 1;
            return -1;
        }

        ret_val = q_buffer(ctx, v4l2_buf, buffer, ctx->op_buf_type, ctx->op_mem_type, ctx->op_num_planes);
        if (ret_val)
        {
            cerr << "Error Qing buffer at output plane" << endl;
            ctx->in_error = 1;
            return -1;
        }
    }
    return 0;
}
/**
  * DoDecodeProcess.
  *
  * @param ctx: initDecode return
  * @param data: video frame
  * @param len: video frame len
  */
int DoDecodeProcess(context_t* ctx, const char *data, unsigned int len)
{
    context_t *ctx = (context_t*)handle;
    /* Read the encoded data and Enqueue the output
    ** plane buffers. Exit loop in case file read is complete.
    */
    if (!ctx->in_error) {
        struct v4l2_buffer queue_v4l2_buf_op;
        struct v4l2_plane queue_op_planes[MAX_PLANES];
        Buffer *buffer;

        memset(&queue_v4l2_buf_op, 0, sizeof (queue_v4l2_buf_op));
        memset(queue_op_planes, 0, sizeof (queue_op_planes));

        buffer = ctx->op_buffers[0];
        if (ctx->decode_pixfmt == V4L2_PIX_FMT_H264)
        {
            read_decoder_input_chunk(data, len, buffer); //读取编码过的输入视频流数据
        }
        else
        {
            cerr << "Currently only H264 supported" << endl;
            ctx->in_error = 1;
            return -1;
        }

        queue_v4l2_buf_op.index = ctx->frame_id;
        queue_v4l2_buf_op.m.planes = queue_op_planes;

        /* Enqueue the buffer on output plane
        ** It is necessary to queue an empty buffer
        ** to signal EOS to the decoder.
        */
        int ret = q_buffer(ctx, queue_v4l2_buf_op, buffer, ctx->op_buf_type, ctx->op_mem_type, ctx->op_num_planes); //把数据都放到queue_v4l2_buf_op里
        if (ret)
        {
            cerr << "Error Qing buffer at output plane" << endl;
            ctx->in_error = 1;
            return -1;
        }
    }

    // Dequeue and queue loop on output plane.
    decode_process(ctx, data, len);

    /* For blocking mode, after getting EOS on output plane,
    ** dequeue all the queued buffers on output plane.
    ** After that capture plane loop should be signalled to stop.
    */
    while (ctx->num_queued_op_buffers > 0 && !ctx->in_error && !ctx->got_eos)
    {
        struct v4l2_buffer v4l2_buf;
        struct v4l2_plane planes[MAX_PLANES];

        memset(&v4l2_buf, 0, sizeof (v4l2_buf));
        memset(planes, 0, sizeof (planes));

        v4l2_buf.m.planes = planes;
        int ret = dq_buffer(ctx, v4l2_buf, NULL, ctx->op_buf_type, ctx->op_mem_type, -1);
        if (ret)
        {
            cerr << "Error DQing buffer at output plane" << endl;
            ctx->in_error = 1;
            break;
        }
    }
    //如果想让capture plane处理线程退出的话,可以在这里设置标志位
    return 0;
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值