ffmpeg源码分析:avcodec_close()

      本文简单分析FFmpeg的avcodec_close()函数。该函数用于关闭编码器。avcodec_close()函数的声明位于libavcodec\avcodec.h,如下所示。

/**
 * Close a given AVCodecContext and free all the data associated with it
 * (but not the AVCodecContext itself).
 *
 * Calling this function on an AVCodecContext that hasn't been opened will free
 * the codec-specific data allocated in avcodec_alloc_context3() with a non-NULL
 * codec. Subsequent calls will do nothing.
 *
 * @note Do not use this function. Use avcodec_free_context() to destroy a
 * codec context (either open or closed). Opening and closing a codec context
 * multiple times is not supported anymore -- use multiple codec contexts
 * instead.
 */
int avcodec_close(AVCodecContext *avctx);

      avcodec_close()的定义位于libavcodec\utils.c,如下所示。

av_cold int avcodec_close(AVCodecContext *avctx)
{
    int i;

    if (!avctx)
        return 0;

    if (avcodec_is_open(avctx)) {
        FramePool *pool = avctx->internal->pool;
        if (CONFIG_FRAME_THREAD_ENCODER &&
            avctx->internal->frame_thread_encoder && avctx->thread_count > 1) {
            ff_frame_thread_encoder_free(avctx);
        }
        if (HAVE_THREADS && avctx->internal->thread_ctx)
            ff_thread_free(avctx);
		/* 关闭解码器 */
        if (avctx->codec && avctx->codec->close)
            avctx->codec->close(avctx);
        avctx->internal->byte_buffer_size = 0;
        av_freep(&avctx->internal->byte_buffer);
        av_frame_free(&avctx->internal->to_free);
        av_frame_free(&avctx->internal->compat_decode_frame);
        av_frame_free(&avctx->internal->buffer_frame);
        av_packet_free(&avctx->internal->buffer_pkt);
        av_packet_free(&avctx->internal->last_pkt_props);

        av_packet_free(&avctx->internal->ds.in_pkt);

        for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
            av_buffer_pool_uninit(&pool->pools[i]);
        av_freep(&avctx->internal->pool);

        if (avctx->hwaccel && avctx->hwaccel->uninit)
            avctx->hwaccel->uninit(avctx);
        av_freep(&avctx->internal->hwaccel_priv_data);

        ff_decode_bsfs_uninit(avctx);

        av_freep(&avctx->internal);
    }

    for (i = 0; i < avctx->nb_coded_side_data; i++)
        av_freep(&avctx->coded_side_data[i].data);
    av_freep(&avctx->coded_side_data);
    avctx->nb_coded_side_data = 0;

    av_buffer_unref(&avctx->hw_frames_ctx);
    av_buffer_unref(&avctx->hw_device_ctx);

    if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
        av_opt_free(avctx->priv_data);
    av_opt_free(avctx);
    av_freep(&avctx->priv_data);
    if (av_codec_is_encoder(avctx->codec)) {
        av_freep(&avctx->extradata);
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
        av_frame_free(&avctx->coded_frame);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
    }
    avctx->codec = NULL;
    avctx->active_thread_type = 0;

    return 0;
}

      从avcodec_close()的定义可以看出,该函数释放AVCodecContext中有关的变量,并且调用了AVCodec的close()关闭了解码器。

AVCodec->close()

      AVCodec的close()是一个函数指针,指向了特定编码器的关闭函数。在这里我们以libx264为例,看一下它对应的AVCodec的结构体的定义,如下所示。

AVCodec ff_libx264_encoder = {
    .name             = "libx264",
    .long_name        = NULL_IF_CONFIG_SMALL("libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
    .type             = AVMEDIA_TYPE_VIDEO,
    .id               = AV_CODEC_ID_H264,
    .priv_data_size   = sizeof(X264Context),
    .init             = X264_init,
    .encode2          = X264_frame,
    .close            = X264_close,
    .capabilities     = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS |
                        AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE,
    .priv_class       = &x264_class,
    .defaults         = x264_defaults,
    .init_static_data = X264_init_static,
    .caps_internal    = FF_CODEC_CAP_INIT_CLEANUP,
    .wrapper_name     = "libx264",
};

      从ff_libx264_encoder的定义可以看出:close()函数对应的是X264_close()函数。继续看一下X264_close()函数的定义,如下所示。

static av_cold int X264_close(AVCodecContext *avctx)
{
    X264Context *x4 = avctx->priv_data;

    av_freep(&avctx->extradata);
    av_freep(&x4->sei);
    av_freep(&x4->reordered_opaque);

    if (x4->enc) {
        x264_encoder_close(x4->enc);
        x4->enc = NULL;
    }

    return 0;
}

 

### 回答1: 以下是基于FFmpeg的音量柱播放器的示例代码: ```c++ #include <iostream> #include <cstring> #include <cmath> #include <cstdio> #include <cstdlib> #include <SDL2/SDL.h> #include <SDL2/SDL_audio.h> #include <SDL2/SDL_main.h> #include <libavutil/avutil.h> #include <libavutil/frame.h> #include <libavutil/samplefmt.h> #include <libavformat/avformat.h> #include <libswresample/swresample.h> #define OUTPUT_CHANNELS 2 #define OUTPUT_SAMPLE_RATE 44100 #define OUTPUT_BIT_RATE 64000 #define VOLUME_BAR_WIDTH 50 #define VOLUME_BAR_HEIGHT 10 #define MAX_AUDIO_FRAME_SIZE 192000 using namespace std; struct AudioData { uint8_t* audio_buffer; uint32_t audio_pos; uint32_t audio_len; }; void audio_callback(void* userdata, Uint8* stream, int len) { AudioData* audio_data = (AudioData*) userdata; if (audio_data->audio_len == 0) { return; } len = len > audio_data->audio_len ? audio_data->audio_len : len; SDL_memcpy(stream, audio_data->audio_buffer + audio_data->audio_pos, len); audio_data->audio_pos += len; audio_data->audio_len -= len; } int main(int argc, char* argv[]) { if (argc < 2) { cerr << "Usage: " << argv[0] << " <audio_file>" << endl; return 1; } const char* audio_file = argv[1]; av_register_all(); AVFormatContext* format_context = avformat_alloc_context(); if (avformat_open_input(&format_context, audio_file, nullptr, nullptr) != 0) { cerr << "Error: could not open audio file" << endl; return 1; } if (avformat_find_stream_info(format_context, nullptr) < 0) { cerr << "Error: could not find audio stream information" << endl; return 1; } int audio_stream_index = -1; AVCodecContext* codec_context = nullptr; for (unsigned int i = 0; i < format_context->nb_streams; i++) { if (format_context->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { audio_stream_index = i; codec_context = avcodec_alloc_context3(nullptr); if (avcodec_parameters_to_context(codec_context, format_context->streams[i]->codecpar) < 0) { cerr << "Error: could not copy codec parameters to codec context" << endl; return 1; } break; } } if (audio_stream_index == -1) { cerr << "Error: could not find audio stream" << endl; return 1; } AVCodec* codec = avcodec_find_decoder(codec_context->codec_id); if (codec == nullptr) { cerr << "Error: could not find codec" << endl; return 1; } if (avcodec_open2(codec_context, codec, nullptr) < 0) { cerr << "Error: could not open codec" << endl; return 1; } AVPacket packet; AVFrame* frame = av_frame_alloc(); uint8_t* buffer = (uint8_t*) av_malloc(MAX_AUDIO_FRAME_SIZE); SwrContext* swr_context = swr_alloc_set_opts( nullptr, av_get_default_channel_layout(OUTPUT_CHANNELS), AV_SAMPLE_FMT_S16, OUTPUT_SAMPLE_RATE, av_get_default_channel_layout(codec_context->channels), codec_context->sample_fmt, codec_context->sample_rate, 0, nullptr ); if (swr_init(swr_context) < 0) { cerr << "Error: could not initialize sample rate converter" << endl; return 1; } AVFrame* converted_frame = av_frame_alloc(); int converted_frame_size = av_samples_get_buffer_size( nullptr, OUTPUT_CHANNELS, codec_context->frame_size, AV_SAMPLE_FMT_S16, 1 ); AudioData audio_data; audio_data.audio_buffer = (uint8_t*) av_malloc(MAX_AUDIO_FRAME_SIZE); audio_data.audio_pos = 0; audio_data.audio_len = 0; SDL_Init(SDL_INIT_AUDIO); SDL_AudioSpec wanted_spec, obtained_spec; wanted_spec.freq = OUTPUT_SAMPLE_RATE; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = OUTPUT_CHANNELS; wanted_spec.samples = codec_context->frame_size; wanted_spec.callback = audio_callback; wanted_spec.userdata = &audio_data; if (SDL_OpenAudio(&wanted_spec, &obtained_spec) < 0) { cerr << "Error: could not open audio device" << endl; return 1; } SDL_PauseAudio(0); while (av_read_frame(format_context, &packet) >= 0) { if (packet.stream_index == audio_stream_index) { int ret = avcodec_send_packet(codec_context, &packet); if (ret < 0) { cerr << "Error: could not send packet to decoder" << endl; return 1; } while (ret >= 0) { ret = avcodec_receive_frame(codec_context, frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { break; } else if (ret < 0) { cerr << "Error: could not receive frame from decoder" << endl; return 1; } int samples_count = swr_convert( swr_context, &buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t**) frame->data, frame->nb_samples ); memcpy(converted_frame->data[0], buffer, converted_frame_size); audio_data.audio_buffer = converted_frame->data[0]; audio_data.audio_pos = 0; audio_data.audio_len = converted_frame_size; int volume_bar_count = (int) round(fabs(av_frame_get_best_effort_sample(frame, 0, 0)) / INT16_MAX * VOLUME_BAR_WIDTH); cout << "["; for (int i = 0; i < volume_bar_count; i++) { cout << "="; } for (int i = 0; i < VOLUME_BAR_WIDTH - volume_bar_count; i++) { cout << " "; } cout << "]" << endl; } av_packet_unref(&packet); } } avcodec_free_context(&codec_context); avformat_close_input(&format_context); av_frame_free(&frame); av_free(buffer); swr_free(&swr_context); av_frame_free(&converted_frame); av_free(audio_data.audio_buffer); SDL_CloseAudio(); SDL_Quit(); return 0; } ``` 该示例代码基于SDL2和FFmpeg,使用SDL2播放音频,并使用FFmpeg解码音频文件。在播放音频时,它还计算每个帧的音量并打印音量柱。 ### 回答2: 基于ffmpeg的带音量柱播放器源码是通过使用ffmpeg库来解码音频文件,并通过音频分析算法来获取音频波形数据,然后将其绘制成音量柱状图显示在播放器界面上。 以下是一个可能的基于ffmpeg的带音量柱播放器的源码示例: ``` #include <iostream> #include <SDL2/SDL.h> #include <SDL2/SDL_audio.h> #include <SDL2/SDL_mutex.h> #include <SDL2/SDL_thread.h> #include <ffmpeg/avcodec.h> #include <ffmpeg/avformat.h> #define MAX_AUDIO_FRAME_SIZE 192000 // 音频流数据缓冲区 typedef struct PacketQueue { AVPacketList *first_pkt; AVPacketList *last_pkt; int nb_packets; int size; SDL_mutex *mutex; SDL_cond *cond; } PacketQueue; PacketQueue audioq; int quit = 0; int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { static AVPacket pkt; static uint8_t *audio_pkt_data = NULL; static int audio_pkt_size = 0; while (1) { while (audio_pkt_size > 0) { int got_frame = 0; int len1 = avcodec_decode_audio4(aCodecCtx, frame, &got_frame, &pkt); if (len1 < 0) { audio_pkt_size = 0; break; } audio_pkt_data += len1; audio_pkt_size -= len1; if (got_frame) { int data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels, frame->nb_samples, aCodecCtx->sample_fmt, 1); memcpy(audio_buf, frame->data[0], data_size); return data_size; } } if (pkt.data) av_free_packet(&pkt); if (quit) { return -1; } if (packet_queue_get(&audioq, &pkt, 1) < 0) { return -1; } audio_pkt_data = pkt.data; audio_pkt_size = pkt.size; } } static void audio_callback(void *userdata, Uint8 *stream, int len) { AVCodecContext *aCodecCtx = (AVCodecContext *)userdata; int len1, audio_size; static uint8_t audio_buf[MAX_AUDIO_FRAME_SIZE * 3 / 2]; static unsigned int audio_buf_size = 0; static unsigned int audio_buf_index = 0; while (len > 0) { if (audio_buf_index >= audio_buf_size) { audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf)); if (audio_size < 0) { // 播放结束 memset(stream, 0, len); return; } audio_buf_size = (audio_size / 4) * 4; audio_buf_index = 0; } len1 = audio_buf_size - audio_buf_index; if (len1 > len) { len1 = len; } memcpy(stream, (uint8_t *) audio_buf + audio_buf_index, len1); len -= len1; stream += len1; audio_buf_index += len1; } } int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx = NULL; int audioStream = -1; AVCodecContext *aCodecCtx = NULL; AVCodec *aCodec = NULL; if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) { return -1; } if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { return -1; } for (int i = 0; i < pFormatCtx->nb_streams; i++) { if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { audioStream = i; break; } } if (audioStream == -1) { return -1; } aCodecCtx = avcodec_alloc_context3(NULL); avcodec_parameters_to_context(aCodecCtx, pFormatCtx->streams[audioStream]->codecpar); aCodec = avcodec_find_decoder(aCodecCtx->codec_id); if (aCodec == NULL) { return -1; } if (avcodec_open2(aCodecCtx, aCodec, NULL) < 0) { return -1; } SDL_Init(SDL_INIT_AUDIO | SDL_INIT_TIMER); SDL_AudioSpec desired_spec, spec; desired_spec.freq = aCodecCtx->sample_rate; desired_spec.format = AUDIO_S16SYS; desired_spec.channels = aCodecCtx->channels; desired_spec.silence = 0; desired_spec.samples = SDL_AUDIO_BUFFER_SIZE; desired_spec.callback = audio_callback; desired_spec.userdata = aCodecCtx; if (SDL_OpenAudio(&desired_spec, &spec) < 0) { return -1; } packet_queue_init(&audioq); SDL_PauseAudio(0); // 主循环 while (!quit) { // 处理音频队列 SDL_Delay(10); } avcodec_close(aCodecCtx); avformat_close_input(&pFormatCtx); return 0; } ``` 以上是一个简单的基于ffmpeg的带音量柱播放器的源码示例。该程序打开音频文件,解码音频数据并传递给SDL音频回调函数进行播放。同时,你可以在主循环中添加其他功能,如绘制音量柱状图的操作。 注意,这只是一个简化的示例源码,仅供参考。实际开发中可能需要根据具体需求进行更多的功能添加和自定义。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值