1、首先参考:FFmpeg5.0源码阅读——内存池AVBufferPool
2、链表是怎么串起来的?
看下面三个核心函数:
static void pool_release_buffer(void *opaque, uint8_t *data)
{
BufferPoolEntry *buf = opaque;
AVBufferPool *pool = buf->pool;
if(CONFIG_MEMORY_POISONING)
memset(buf->data, FF_MEMORY_POISON, pool->size);
ff_mutex_lock(&pool->mutex);
buf->next = pool->pool;
pool->pool = buf;
ff_mutex_unlock(&pool->mutex);
if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
buffer_pool_free(pool);
}
/* allocate a new buffer and override its free() callback so that
* it is returned to the pool on free */
static AVBufferRef *pool_alloc_buffer(AVBufferPool *pool)
{
BufferPoolEntry *buf;
AVBufferRef *ret;
av_assert0(pool->alloc || pool->alloc2);
ret = pool->alloc2 ? pool->alloc2(pool->opaque, pool->size) :
pool->alloc(pool->size);
if (!ret)
return NULL;
buf = av_mallocz(sizeof(*buf));
if (!buf) {
av_buffer_unref(&ret);
return NULL;
}
buf->data = ret->buffer->data;
buf->opaque = ret->buffer->opaque;
buf->free = ret->buffer->free;
buf->pool = pool;
ret->buffer->opaque = buf;
ret->buffer->free = pool_release_buffer;
return ret;
}
AVBufferRef *av_buffer_pool_get(AVBufferPool *pool)
{
AVBufferRef *ret;
BufferPoolEntry *buf;
ff_mutex_lock(&pool->mutex);
buf = pool->pool;
if (buf) {
ret = av_buffer_create(buf->data, pool->size, pool_release_buffer,
buf, 0);
if (ret) {
pool->pool = buf->next;
buf->next = NULL;
}
} else {
ret = pool_alloc_buffer(pool);
}
ff_mutex_unlock(&pool->mutex);
if (ret)
atomic_fetch_add_explicit(&pool->refcount, 1, memory_order_relaxed);
return ret;
}
其中用户调用av_buffer_pool_get() -> pool_alloc_buffer()
如果是第一次创建的时候:
int av_hwframe_ctx_init(AVBufferRef *ref)
{
AVHWFramesContext *ctx = (AVHWFramesContext*)ref->data;
const enum AVPixelFormat *pix_fmt;
int ret;
if (ctx->internal->source_frames) {
/* A derived frame context is already initialised. */
return 0;
}
/* validate the pixel format */
for (pix_fmt = ctx->internal->hw_type->pix_fmts; *pix_fmt != AV_PIX_FMT_NONE; pix_fmt++) {
if (*pix_fmt == ctx->format)
break;
}
if (*pix_fmt == AV_PIX_FMT_NONE) {
av_log(ctx, AV_LOG_ERROR,
"The hardware pixel format '%s' is not supported by the device type '%s'\n",
av_get_pix_fmt_name(ctx->format), ctx->internal->hw_type->name);
return AVERROR(ENOSYS);
}
/* validate the dimensions */
ret = av_image_check_size(ctx->width, ctx->height, 0, ctx);
if (ret < 0)
return ret;
/* format-specific init */
if (ctx->internal->hw_type->frames_init) {
ret = ctx->internal->hw_type->frames_init(ctx);
if (ret < 0)
goto fail;
}
if (ctx->internal->pool_internal && !ctx->pool)
ctx->pool = ctx->internal->pool_internal;
/* preallocate the frames in the pool, if requested */
if (ctx->initial_pool_size > 0) {
ret = hwframe_pool_prealloc(ref);
if (ret < 0)
goto fail;
}
return 0;
fail:
if (ctx->internal->hw_type->frames_uninit)
ctx->internal->hw_type->frames_uninit(ctx);
return ret;
}
hwframe_pool_prealloc()是第一次创建内存池。
static int hwframe_pool_prealloc(AVBufferRef *ref)
{
AVHWFramesContext *ctx = (AVHWFramesContext*)ref->data;
AVFrame **frames;
int i, ret = 0;
frames = av_mallocz_array(ctx->initial_pool_size, sizeof(*frames));
if (!frames)
return AVERROR(ENOMEM);
for (i = 0; i < ctx->initial_pool_size; i++) {
frames[i] = av_frame_alloc();
if (!frames[i])
goto fail;
ret = av_hwframe_get_buffer(ref, frames[i], 0);
if (ret < 0)
goto fail;
}
fail:
for (i = 0; i < ctx->initial_pool_size; i++)
av_frame_free(&frames[i]);
av_freep(&frames);
return ret;
}
可以看到for循环创建的过程。av_hwframe_get_buffer()
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
{
AVHWFramesContext *ctx = (AVHWFramesContext*)hwframe_ref->data;
int ret;
if (ctx->internal->source_frames) {
// This is a derived frame context, so we allocate in the source
// and map the frame immediately.
AVFrame *src_frame;
frame->format = ctx->format;
frame->hw_frames_ctx = av_buffer_ref(hwframe_ref);
if (!frame->hw_frames_ctx)
return AVERROR(ENOMEM);
src_frame = av_frame_alloc();
if (!src_frame)
return AVERROR(ENOMEM);
ret = av_hwframe_get_buffer(ctx->internal->source_frames,
src_frame, 0);
if (ret < 0) {
av_frame_free(&src_frame);
return ret;
}
ret = av_hwframe_map(frame, src_frame,
ctx->internal->source_allocation_map_flags);
if (ret) {
av_log(ctx, AV_LOG_ERROR, "Failed to map frame into derived "
"frame context: %d.\n", ret);
av_frame_free(&src_frame);
return ret;
}
// Free the source frame immediately - the mapped frame still
// contains a reference to it.
av_frame_free(&src_frame);
return 0;
}
if (!ctx->internal->hw_type->frames_get_buffer)
return AVERROR(ENOSYS);
if (!ctx->pool)
return AVERROR(EINVAL);
frame->hw_frames_ctx = av_buffer_ref(hwframe_ref);
if (!frame->hw_frames_ctx)
return AVERROR(ENOMEM);
ret = ctx->internal->hw_type->frames_get_buffer(ctx, frame);
if (ret < 0) {
av_buffer_unref(&frame->hw_frames_ctx);
return ret;
}
frame->extended_data = frame->data;
return 0;
}
其中会调用用户设置的一个回调函数:
hw_type->frames_get_buffe(),这里我们看英伟达的解码器:
static int cuda_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
{
CUDAFramesContext *priv = ctx->internal->priv;
int res;
frame->buf[0] = av_buffer_pool_get(ctx->pool);
if (!frame->buf[0])
return AVERROR(ENOMEM);
res = av_image_fill_arrays(frame->data, frame->linesize, frame->buf[0]->data,
ctx->sw_format, ctx->width, ctx->height, priv->tex_alignment);
if (res < 0)
return res;
// YUV420P is a special case.
// Nvenc expects the U/V planes in swapped order from how ffmpeg expects them, also chroma is half-aligned
if (ctx->sw_format == AV_PIX_FMT_YUV420P) {
frame->linesize[1] = frame->linesize[2] = frame->linesize[0] / 2;
frame->data[2] = frame->data[1];
frame->data[1] = frame->data[2] + frame->linesize[2] * (ctx->height / 2);
}
frame->format = AV_PIX_FMT_CUDA;
frame->width = ctx->width;
frame->height = ctx->height;
return 0;
}
所以真正调用的是av_buffer_pool_get(),这里就和我们最开始的代码一致了。
av_buffer_pool_get()在第一次创建了一个BufferPoolEntry,在pool_release_buffer()的时候串起来,注意,这里是个单链表:
static void pool_release_buffer(void *opaque, uint8_t *data)
{
BufferPoolEntry *buf = opaque;
AVBufferPool *pool = buf->pool;
if(CONFIG_MEMORY_POISONING)
memset(buf->data, FF_MEMORY_POISON, pool->size);
ff_mutex_lock(&pool->mutex);
buf->next = pool->pool;
//这里是链表头,也就是最后一个release的是头
pool->pool = buf;
ff_mutex_unlock(&pool->mutex);
if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
buffer_pool_free(pool);
}