简介
这个例子实现的是从本地文件读取yuv的数据,然后使用硬件进行编码。展示了:
- 如何使用libv4l2 API 采集数据
- 如何对视频进行H.264/H.265进行编码
- 在编码之前使用NVIDIA CUDA API在输入的YUV上画了一个黑色的框
编译和运行
编译之前需要先把环境搭建好。编译很简单,直接到video_cuda_enc的目录直接make即可。
输入以下命令:
$ cd $HOME/tegra_multimedia_api/samples/03_video_cuda_enc
$make
运行命令的格式如下:
$ ./video_cuda_enc <in-file> <in-width> <in-height> <encoder-type> <out-file> [OPTIONS]
例如:
$ ./video_cuda_enc ../../data/Video/sample_outdoor_car_1080p_10fps.yuv 1920 1080 H264 test.h264
YUV的数据如果没有的话使用上一例子生成一个YUV数据。
$ cd $HOME/tegra_multimedia_api/samples/02_video_dec_cuda/
$ ./video_dec_cuda ../../data/Video/sample_outdoor_car_1080p_10fps.h264 H264 --disable-rendering \
-o ../../data/Video/sample_outdoor_car_1080p_10fps.yuv -f 2
程序流程图
下面的图显示了该例程的流程图:
NvEGLImageFromFd 从tegra分配的文件描述符缓冲区返回一个EGLImage指针。cuda然后使用EGLImage缓冲区渲染矩形。然后,缓冲器执行视频编码操作并将其编码为h.264或h.265视频流。
代码分析
这个程序包中全都是使用一个叫context_t的结构体做为全局变量,这样哪里访问都可以。
编码程序这里最主要的一个类就是NvVideoEncoder类,它包含了所有视频编码的方法和元素。还有一个NvVideoConverter类,它包含所有视频转换相关的元素和方法。该例程中使用了以下几个关键的成员:
output_plane | 指定输出的plane |
capture_plane | 指定采集的plane |
createVideoEncoder | 它是静态函数,创建视频编码实例 |
setExtControls | 给V4l2设备设置外部控制 |
setOutputPlaneFormat | 设置output plane的格式 |
setCapturePlaneFormat | 设置capture plaen的格式 |
NvVideoEncoder包含两个关键的元素:output_plane 和 capture_plane.这两个元素都是继承自NvV4l2ElementPlane.
NvV4l2ElementPlane | Description |
setupPlane | Sets up the plane of V4l2 element. |
deinitPlane | Destroys the plane of V4l2 element. |
setStreamStatus | Starts/stops the stream. |
setDQThreadCallback | Sets the callback function of dqueue buffer thread. |
startDQThread | Starts the thread of dqueue buffer. |
stopDQThread | Stops the thread of dqueue buffer. |
qBuffer | Queues the V4l2 buffer. |
dqBuffer | Dqueues V4l2 buffer. |
getNumBuffers | Gets the number of V4l2 buffers. |
getNthBuffer | Gets the nth V4l2 buffer. |
下面来看一下整个代码的整体流程:
/*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "NvVideoEncoder.h"
#include "NvUtils.h"
#include <fstream>
#include <iostream>
#include <linux/videodev2.h>
#include <malloc.h>
#include <string.h>
#include "nvbuf_utils.h"
#include "NvCudaProc.h"
#include "video_cuda_enc.h"
#define TEST_ERROR(cond, str, label) if(cond) { \
cerr << str << endl; \
error = 1; \
goto label; }
using namespace std;
static void
abort(context_t *ctx)
{
ctx->got_error = true;
ctx->enc->abort();
}
static int
write_encoder_output_frame(ofstream * stream, NvBuffer * buffer)
{
stream->write((char *) buffer->planes[0].data, buffer->planes[0].bytesused);
return 0;
}
/**
Parameters
v4l2_buf A pointer to the v4l2_buffer structure that is used for dequeueing.
buffer A pointer to the NvBuffer object at the index contained in v4l2_buf.
shared_buffer A pointer to the NvBuffer object if the plane shares a buffer with other elements, else NULL.
data A pointer to application specific data that is set with startDQThread.
Returns
If the application implementing this call returns FALSE, the DQThread is stopped; else, the DQ Thread continues running.
*/
//The callback method is called from the DQ Thread once a buffer is successfully dequeued.
static bool
encoder_capture_plane_dq_callback(struct v4l2_buffer *v4l2_buf, NvBuffer * buffer,
NvBuffer * shared_buffer, void *arg)
{
context_t *ctx = (context_t *) arg;
NvVideoEncoder *enc = ctx->enc;
if (!v4l2_buf)
{
cerr << "Failed to dequeue buffer from encoder capture plane" << endl;
abort(ctx);
return false;
}
//编码后的数据写入文件,buffer就是编码后的数据结构
write_encoder_output_frame(ctx->out_file, buffer);
//Queues a buffer on the plane. Plane上加一个buffer
if (enc->capture_plane.qBuffer(*v4l2_buf, NULL) < 0)
{
cerr << "Error while Qing buffer at capture plane" << endl;
abort(ctx);
return false;
}
// GOT EOS from encoder. Stop dqthread.
if (buffer->planes[0].bytesused == 0)
{
return false;
}
return true;
}
static void
set_defaults(context_t * ctx)
{
memset(ctx, 0, sizeof(context_t));
ctx->bitrate = 4 * 1024 * 1024;
ctx->fps_n = 30;
ctx->fps_d = 1;
}
int
main(int argc, char *argv[])
{
context_t ctx;
int ret = 0;
int error = 0;
bool eos = false;
set_defaults(&ctx);
ctx.eglDisplay = eglGetDisplay(EGL_DEFAULT_DISPLAY);
if (ctx.eglDisplay == EGL_NO_DISPLAY)
{
cout<<"Could not get EGL display connection"<<endl;
return -1;
}
// Initialize egl
if (!eglInitialize(ctx.eglDisplay, NULL, NULL))
{
cout<<"init EGL display failed"<<endl;
return -1;
}
ret = parse_csv_args(&ctx, argc, argv);
TEST_ERROR(ret < 0, "Error parsing commandline arguments", cleanup);
ctx.in_file = new ifstream(ctx.in_file_path);
TEST_ERROR(!ctx.in_file->is_open(), "Could not open input file", cleanup);
ctx.out_file = new ofstream(ctx.out_file_path);
TEST_ERROR(!ctx.out_file->is_open(), "Could not open output file", cleanup);
//编码器
ctx.enc = NvVideoEncoder::createVideoEncoder("enc0");
TEST_ERROR(!ctx.enc, "Could not create encoder", cleanup);
//CapturePlane的格式要先于OutputPlane格式设置
//设置编码格式输出格式,必须设置宽和高
// It is necessary that Capture Plane format be set before Output Plane
// format.
// Set encoder capture plane format. It is necessary to set width and
// height on thr capture plane as well
// Set encoder output plane format
//这个encoder很像一个管道,因为是需要用到一个模块,所以很像是配置好输入和输出即可
ret =
ctx.enc->setCapturePlaneFormat(ctx.encoder_pixfmt, ctx.width,
ctx.height, 2 * 1024 * 1024);
TEST_ERROR(ret < 0, "Could not set output plane format", cleanup);
ret =
ctx.enc->setOutputPlaneFormat(V4L2_PIX_FMT_YUV420M, ctx.width,
ctx.height);
TEST_ERROR(ret < 0, "Could not set output plane format", cleanup);
//Bitrate of the encoded stream, in bits per second.
ret = ctx.enc->setBitrate(ctx.bitrate);
TEST_ERROR(ret < 0, "Could not set bitrate", cleanup);
//设置编码格式
if (ctx.encoder_pixfmt == V4L2_PIX_FMT_H264)
{
ret = ctx.enc->setProfile(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
}
else
{
ret = ctx.enc->setProfile(V4L2_MPEG_VIDEO_H265_PROFILE_MAIN);
}
TEST_ERROR(ret < 0, "Could not set encoder profile", cleanup);
//264需要设置等级
if (ctx.encoder_pixfmt == V4L2_PIX_FMT_H264)
{
ret = ctx.enc->setLevel(V4L2_MPEG_VIDEO_H264_LEVEL_5_0);
TEST_ERROR(ret < 0, "Could not set encoder level", cleanup);
}
//设置帧率
ret = ctx.enc->setFrameRate(ctx.fps_n, ctx.fps_d);
TEST_ERROR(ret < 0, "Could not set framerate", cleanup);
// Query, Export and Map the output plane buffers so that we can read
// raw data into the buffers
ret = ctx.enc->output_plane.setupPlane(V4L2_MEMORY_MMAP, 10, true, false);
TEST_ERROR(ret < 0, "Could not setup output plane", cleanup);
// Query, Export and Map the output plane buffers so that we can write
// encoded data from the buffers
ret = ctx.enc->capture_plane.setupPlane(V4L2_MEMORY_MMAP, 6, true, false);
TEST_ERROR(ret < 0, "Could not setup capture plane", cleanup);
// output plane STREAMON
ret = ctx.enc->output_plane.setStreamStatus(true);
TEST_ERROR(ret < 0, "Error in output plane streamon", cleanup);
// capture plane STREAMON
ret = ctx.enc->capture_plane.setStreamStatus(true);
TEST_ERROR(ret < 0, "Error in capture plane streamon", cleanup);
//Capture_plane的回调函数,主要是处理
ctx.enc->capture_plane.
setDQThreadCallback(encoder_capture_plane_dq_callback);
// startDQThread starts a thread internally which calls the
// encoder_capture_plane_dq_callback whenever a buffer is dequeued
// on the plane
//内部开启一个线程,当一个buffer dequeued 线程会调用encoder_capture_plane_dq_callback
ctx.enc->capture_plane.startDQThread(&ctx);
// Enqueue all the empty capture plane buffers
for (uint32_t i = 0; i < ctx.enc->capture_plane.getNumBuffers(); i++)
{
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
v4l2_buf.index = i;
v4l2_buf.m.planes = planes;
ret = ctx.enc->capture_plane.qBuffer(v4l2_buf, NULL);
if (ret < 0)
{
cerr << "Error while queueing buffer at capture plane" << endl;
abort(&ctx);
goto cleanup;
}
}
// Read video frame and queue all the output plane buffers
for (uint32_t i = 0; i < ctx.enc->output_plane.getNumBuffers() &&
!ctx.got_error; i++)
{
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
NvBuffer *buffer = ctx.enc->output_plane.getNthBuffer(i);
int fd;
void **dat;
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, MAX_PLANES * sizeof(struct v4l2_plane));
v4l2_buf.index = i;
v4l2_buf.m.planes = planes;
if (read_video_frame(ctx.in_file, *buffer) < 0)
{
cerr << "Could not read complete frame from input file" << endl;
v4l2_buf.m.planes[0].bytesused = 0;
}
fd = buffer->planes[0].fd;
for (uint32_t j = 0 ; j < buffer->n_planes ; j++)
{
dat = (void **)&buffer->planes[j].data;
ret = NvBufferMemSyncForDevice (fd, j, dat);
if (ret < 0)
{
cerr << "Error while NvBufferMemSyncForDevice at output plane" << endl;
abort(&ctx);
goto cleanup;
}
}
ctx.eglimg = NvEGLImageFromFd(ctx.eglDisplay, buffer->planes[0].fd);
HandleEGLImage(&ctx.eglimg);
NvDestroyEGLImage(ctx.eglDisplay, ctx.eglimg);
//yuv数据加入output_plane queue
ret = ctx.enc->output_plane.qBuffer(v4l2_buf, NULL);
if (ret < 0)
{
cerr << "Error while queueing buffer at output plane" << endl;
abort(&ctx);
goto cleanup;
}
if (v4l2_buf.m.planes[0].bytesused == 0)
{
cerr << "File read complete." << endl;
eos = true;
break;
}
}
// Keep reading input till EOS is reached
while (!ctx.got_error && !ctx.enc->isInError() && !eos)
{
struct v4l2_buffer v4l2_buf;
struct v4l2_plane planes[MAX_PLANES];
NvBuffer *buffer;
int fd;
void **dat;
memset(&v4l2_buf, 0, sizeof(v4l2_buf));
memset(planes, 0, sizeof(planes));
v4l2_buf.m.planes = planes;
//从plane中取一个buffer,这是一个阻塞调用,或者成功返回一个buffer,或者超时返回,
//If buffer is not NULL, returns the NvBuffer object at the index returned by the VIDIOC_DQBUF IOCTL.
//If this plane shares a buffer with other elements and shared_buffer is not NULL, returns the shared NvBuffer object in shared_buffer.
//Returns 0 for success, -1 otherwise.
if (ctx.enc->output_plane.dqBuffer(v4l2_buf, &buffer, NULL, 10) < 0)
{
cerr << "ERROR while DQing buffer at output plane" << endl;
abort(&ctx);
goto cleanup;
}
if (read_video_frame(ctx.in_file, *buffer) < 0)
{
cerr << "Could not read complete frame from input file" << endl;
v4l2_buf.m.planes[0].bytesused = 0;
}
fd = buffer->planes[0].fd;
for (uint32_t j = 0 ; j < buffer->n_planes ; j++)
{
dat = (void **)&buffer->planes[j].data;
ret = NvBufferMemSyncForDevice (fd, j, dat); //此方法用于设备的硬件内存缓存同步。
if (ret < 0)
{
cerr << "Error while NvBufferMemSyncForDevice at output plane" << endl;
abort(&ctx);
goto cleanup;
}
}
ctx.eglimg = NvEGLImageFromFd(ctx.eglDisplay, buffer->planes[0].fd);
HandleEGLImage(&ctx.eglimg);
NvDestroyEGLImage(ctx.eglDisplay, ctx.eglimg);
//加入队列
ret = ctx.enc->output_plane.qBuffer(v4l2_buf, NULL);
if (ret < 0)
{
cerr << "Error while queueing buffer at output plane" << endl;
abort(&ctx);
goto cleanup;
}
if (v4l2_buf.m.planes[0].bytesused == 0)
{
cerr << "File read complete." << endl;
eos = true;
break;
}
}
// Wait till capture plane DQ Thread finishes
// i.e. all the capture plane buffers are dequeued
ctx.enc->capture_plane.waitForDQThread(2000);
cleanup:
if (ctx.enc && ctx.enc->isInError())
{
cerr << "Encoder is in error" << endl;
error = 1;
}
if (ctx.got_error)
{
error = 1;
}
delete ctx.enc;
delete ctx.in_file;
delete ctx.out_file;
free(ctx.in_file_path);
free(ctx.out_file_path);
if (!eglTerminate(ctx.eglDisplay))
{
cout<<"ERROR eglTerminate failed"<<endl;
error = 1;
}
if (!eglReleaseThread())
{
cout<<"ERROR eglReleaseThread failed"<<endl;
error = 1;
}
if (error)
{
cout << "App run failed" << endl;
}
else
{
cout << "App run was successful" << endl;
}
return -error;
}