FFMPEG采集音频设备并输出到文件

        之前有将pcm数据进行编码并写入文件,这次有些许不同,音频来源于电脑音频捕获设备。从音频设备获取的数据不能直接使用,需要进行解码然后再进行重采样,最终得到编码器需要的数据格式。

        具体的代码如下,为了减少代码量,例子中没有对函数返回值进行判断。可以先熟悉整体的流程,实际运用时再根据需要进行添加。

void func()
{
     avdevice_register_all();

    AVFormatContext  *avFmtCtxOut;
    std::string outFilePath = "d:/capture.mp4";

    AVOutputFormat *fmt = av_guess_format(NULL, outFilePath.c_str(), NULL);
    avformat_alloc_output_context2(&avFmtCtxOut, fmt, fmt->name, outFilePath.c_str());

    //打开设备
    AVFormatContext* pAudioContext = nullptr;
    std::string audioDevice = "audio=virtual-audio-capturer";//"audio=" + audioDevice;
    AVInputFormat* AudioInputFormat = av_find_input_format("dshow");
    AVDictionary* AudioOptions=nullptr;
    int ret = avformat_open_input(&pAudioContext, audioDevice.c_str(), AudioInputFormat, &AudioOptions);

    //查找音频流
    if(avformat_find_stream_info(pAudioContext,NULL)< 0)
    {
        return;
    }

    int audioStreamId = 0;
    int StreamsNumber = (int)pAudioContext->nb_streams;
    AVStream* audioStream = NULL;
    for (int i = 0; i < StreamsNumber; i++) {
        if (pAudioContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            audioStreamId = i;
            audioStream = pAudioContext->streams[i];
            break;
        }
    }

    AVCodecParameters *AudioParams = audioStream->codecpar;
    //查找解码器
    AVCodec* AudioCodecIn = avcodec_find_decoder(AudioParams->codec_id);

    /*初始化音频解码器*/
    AVCodecContext* AudioCodecContextIn = avcodec_alloc_context3(AudioCodecIn);
    avcodec_parameters_to_context(AudioCodecContextIn, AudioParams);

    //打开编码器
    ret=avcodec_open2(AudioCodecContextIn, AudioCodecIn, NULL);

    //创建音频输出流
    AVStream *AudioStreamOut = avformat_new_stream(avFmtCtxOut, NULL);

    /*查找编码器,初始化编码器上下文*/
    AVCodec* AudioCodecOut = avcodec_find_encoder(AV_CODEC_ID_AAC);

    AVCodecContext* AudioCodecContextOut = avcodec_alloc_context3(AudioCodecOut);


    if ((AudioCodecOut)->supported_samplerates) {
        AudioCodecContextOut->sample_rate = (AudioCodecOut)->supported_samplerates[0];
        for (int i = 0; (AudioCodecOut)->supported_samplerates[i]; i++) {
            if ((AudioCodecOut)->supported_samplerates[i] == AudioCodecContextIn->sample_rate)
                AudioCodecContextOut->sample_rate = AudioCodecContextIn->sample_rate;
        }
    }

    AudioCodecContextOut->codec_id = AV_CODEC_ID_AAC;
    AudioCodecContextOut->bit_rate = 128000;
    AudioCodecContextOut->channels = AudioCodecContextIn->channels;
    AudioCodecContextOut->channel_layout = av_get_default_channel_layout(AudioCodecContextOut->channels);
    AudioCodecContextOut->sample_fmt = AudioCodecOut->sample_fmts ? AudioCodecOut->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
    AudioCodecContextOut->time_base = {1, AudioCodecContextIn->sample_rate};
    AudioCodecContextOut->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
    if (avFmtCtxOut->oformat->flags & AVFMT_GLOBALHEADER)
        AudioCodecContextOut->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    avcodec_open2(AudioCodecContextOut, AudioCodecOut, NULL);


    int audioIndexOut = 0;
    for (int i = 0; i < (int)avFmtCtxOut->nb_streams; i++) {
        if (avFmtCtxOut->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN) {
            audioIndexOut = i;
        }
    }

    avcodec_parameters_from_context(avFmtCtxOut->streams[audioIndexOut]->codecpar, AudioCodecContextOut);

    //init output file
    if (!(avFmtCtxOut->flags & AVFMT_NOFILE)) {
        if (avio_open2(&avFmtCtxOut->pb, outFilePath.c_str(), AVIO_FLAG_WRITE, NULL, NULL) < 0) {

        }
    }

    ret = avformat_write_header(avFmtCtxOut, NULL);

    AVPacket* pRawAudioPkt = av_packet_alloc();
    av_init_packet(pRawAudioPkt);
    AVPacket* reEncodecdAudioPkt = av_packet_alloc();//存编码后的数据
    av_init_packet(reEncodecdAudioPkt);
    AVFrame* pRawAudioFrame= av_frame_alloc();

    AVFrame* pResampledFrame = av_frame_alloc();
    pResampledFrame->nb_samples = AudioCodecContextOut->frame_size;
    pResampledFrame->channel_layout = AudioCodecContextOut->channel_layout;
    pResampledFrame->format = AudioCodecContextOut->sample_fmt;
    pResampledFrame->sample_rate = AudioCodecContextOut->sample_rate;
    av_frame_get_buffer(pResampledFrame, 0);

    uint8_t** resampledData = nullptr;  //存放重采样后的数据

    SwrContext *swrContext = nullptr;
    swrContext = swr_alloc_set_opts(swrContext,
                                    av_get_default_channel_layout(AudioCodecContextOut->channels),
                                    AudioCodecContextOut->sample_fmt,
                                    AudioCodecContextOut->sample_rate,
                                    av_get_default_channel_layout(AudioCodecContextIn->channels),
                                    AudioCodecContextIn->sample_fmt,
                                    AudioCodecContextIn->sample_rate,
                                    0,
                                    nullptr);

    swr_init(swrContext);

    AVAudioFifo *AudioFifoBuff;
    AudioFifoBuff = av_audio_fifo_alloc(AudioCodecContextOut->sample_fmt, AudioCodecContextOut->channels, 1);

    int sampleCount = 0;

    while (!isInterruptionRequested())
    {
        int res = av_read_frame(pAudioContext,pRawAudioPkt);
        if(res>=0 && pRawAudioPkt->stream_index == audioStreamId)
        {
            int ret = avcodec_send_packet(AudioCodecContextIn, pRawAudioPkt);
            if(ret < 0)
            {
                continue;
            }

            av_packet_unref(pRawAudioPkt);
            while(!isInterruptionRequested() )
            {
                ret = avcodec_receive_frame(AudioCodecContextIn, pRawAudioFrame);
                if ( ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                {
                   break;
                }

                if(resampledData == nullptr)
                {
                    resampledData = (uint8_t **)calloc(AudioCodecContextOut->channels, sizeof(*resampledData));

                    av_samples_alloc(resampledData, nullptr, AudioCodecContextOut->channels,
                                         pRawAudioFrame->nb_samples, AudioCodecContextOut->sample_fmt, 0);
                }
                ret = swr_convert(swrContext,
                            resampledData, pRawAudioFrame->nb_samples,
                            (const uint8_t **)pRawAudioFrame->extended_data, pRawAudioFrame->nb_samples);

               ret = av_audio_fifo_realloc(AudioFifoBuff, av_audio_fifo_size(AudioFifoBuff) + pRawAudioFrame->nb_samples);



                av_audio_fifo_write(AudioFifoBuff, (void **)resampledData, pRawAudioFrame->nb_samples);

                while (av_audio_fifo_size(AudioFifoBuff) >= AudioCodecContextOut->frame_size)
                {
                    ret = av_audio_fifo_read(AudioFifoBuff, (void **)(pResampledFrame->data), AudioCodecContextOut->frame_size);
                    pResampledFrame->pts = sampleCount;
                    sampleCount += pResampledFrame->nb_samples;

                    avcodec_send_frame(AudioCodecContextOut, pResampledFrame);

                    while (ret >= 0)
                    {
                        ret = avcodec_receive_packet(AudioCodecContextOut, reEncodecdAudioPkt);
                        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                            break;

                        av_packet_rescale_ts(reEncodecdAudioPkt, AudioCodecContextOut->time_base, avFmtCtxOut->streams[audioIndexOut]->time_base);
                        reEncodecdAudioPkt->stream_index = audioIndexOut;

                        av_write_frame(avFmtCtxOut, reEncodecdAudioPkt);

                        av_packet_unref(reEncodecdAudioPkt);
                     }
                 }
             }
        }
    }
    av_write_trailer(avFmtCtxOut);
    avio_close(avFmtCtxOut->pb);
    swr_free(&swrContext);
    av_frame_free(&pResampledFrame);
    av_frame_free(&pRawAudioFrame);
    av_packet_free(&reEncodecdAudioPkt);
    av_packet_free(&pRawAudioPkt);
    av_audio_fifo_free(AudioFifoBuff);
    av_freep(&resampledData[0]);
    free(resampledData);

    avformat_close_input(&pAudioContext);
}

  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值