ffmpeg记录RTSP视频流的方法

播放视频流和播放硬盘的文件没有太大区别,主要差异在设置超时、延时数据。同样记录的时候也差别不大,主要在与PTS、DTS设置,还有SPS数据会有些差异。

初始化:

     avformat_alloc_output_context2(&o_fmt_ctx, NULL, NULL, filename);
     //打开文件
     o_video_stream = avformat_new_stream(o_fmt_ctx, NULL);
     //生成输出流
     o_video_stream->codecpar=avcodec_parameters_alloc();
     //生成编解码的参数
     avcodec_parameters_copy(o_video_stream->codecpar,pFormatCtx->streams[videoStream]->codecpar);
     //拷贝视频部分参数并设置时间
        o_video_stream->time_base = pFormatCtx->streams[videoStream]->time_base;
//下面配置解码器filter,目的是要SPS和PPS
const AVBitStreamFilter *bsf = av_bsf_get_by_name("h264_mp4toannexb");
        if (!bsf)
        {
            printf("av_bsf_get_by_name() failed");
            return ;
        }
        //2.过滤器分配内存
        av_bsf_alloc(bsf, &m_bsfc);
        //3.添加解码器属性
        avcodec_parameters_copy(m_bsfc->par_in, o_video_stream ->codecpar);
      //  4. 初始化过滤器上下文
        av_bsf_init(m_bsfc);

   //初始化音频部分记录文件的codepar和stream
         o_audio_stream = avformat_new_stream(o_fmt_ctx, NULL);
         o_audio_stream->codecpar=avcodec_parameters_alloc();         avcodec_parameters_copy(o_audio_stream->codecpar,pFormatCtx->streams[audioStream]->codecpar);
          o_audio_stream->time_base = pFormatCtx->streams[audioStream]->time_base;
         const AVBitStreamFilter *bsfcAAC = av_bsf_get_by_name("aac_adtstoasc");
                     if (!bsfcAAC)
                     {
                         printf("av_bsf_get_by_name() failed");
                         return ;
                     }
                     //2.过滤器分配内存
                     av_bsf_alloc(bsfcAAC, &m_absfc);
                     //3.添加解码器属性
                     avcodec_parameters_copy(m_absfc->par_in, o_audio_stream->codecpar);
                   //  4. 初始化过滤器上下文
                     av_bsf_init(m_absfc);

记录部分

主要设置时间、以及增加sps等数据包。

        if(isRTSP)
        {
            if(pkt2->pts==AV_NOPTS_VALUE){
                    //Write PTS
                    AVRational time_base1=pFormatCtx->streams[videoindex]->time_base;
                    //Duration between 2 frames (us)
                    int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(pFormatCtx->streams[videoindex]->r_frame_rate);
                    //Parameters
                    pkt2->pts=(double)(pktcount*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                    pkt2->dts=pkt2->pts;
                    pkt2->duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                }
                //Important:Delay
                if(pkt2->stream_index==videoindex){
                    AVRational time_base = pFormatCtx->streams[videoindex]->time_base;
                    AVRational time_base_q = {1,AV_TIME_BASE}; // AV_TIME_BASE_Q;
                    int64_t pts_time = av_rescale_q(pkt2->dts, time_base, time_base_q);
                    int64_t now_time = av_gettime() - mVideoStartTime;
                    if (pts_time > now_time)
                        av_usleep(pts_time - now_time);
                }

               AVStream *in_stream  = pFormatCtx->streams[pkt2->stream_index];
              AVStream  *out_stream = o_fmt_ctx->streams[pkt2->stream_index];
                /* copy packet */
                //Convert PTS/DTS
                AVRounding rnd = (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
                pkt2->pts = av_rescale_q_rnd(pkt2->pts, in_stream->time_base, out_stream->time_base, rnd);
                pkt2->dts = av_rescale_q_rnd(pkt2->dts, in_stream->time_base, out_stream->time_base, rnd);
                pkt2->duration = av_rescale_q(pkt2->duration, in_stream->time_base, out_stream->time_base);
                pkt2->pos = -1;

        }

       pktcount++;

        if (packet.flags & AV_PKT_FLAG_KEY)
                {
                    m_bFindKey = true;
                }
        if (m_bFindKey)
                {

                    m_pktFilter->data = NULL;
                    m_pktFilter->size = 0;

                    AVStream *in_stream = pFormatCtx->streams[pkt2->stream_index];
                    AVStream *out_stream = o_fmt_ctx->streams[pkt2->stream_index];

                    if (pkt2->stream_index == videoStream)
                    {
                        av_bsf_send_packet(m_bsfc, pkt2);
                        av_bsf_receive_packet(m_bsfc, m_pktFilter);

                        m_pktFilter->pts = av_rescale_q_rnd(m_pktFilter->pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
                        m_pktFilter->dts = av_rescale_q_rnd(m_pktFilter->dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
                        m_pktFilter->duration = av_rescale_q_rnd(m_pktFilter->duration, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
                        m_pktFilter->stream_index = out_stream->index;

                        int nError = av_interleaved_write_frame(o_fmt_ctx, m_pktFilter);
                        if (nError != 0)
                        {
                            char tmpErrString[AV_ERROR_MAX_STRING_SIZE] = { 0 };
                            av_make_error_string(tmpErrString, AV_ERROR_MAX_STRING_SIZE, nError);

                            fprintf(stderr, "av_interleaved_write_frame  video%d,%s" , nError , tmpErrString);
                        }
                    }
                    else if (packet.stream_index == audioStream)
                    {
                        av_bsf_send_packet(m_absfc, pkt2);
                        av_bsf_receive_packet(m_absfc, m_pktFilter);

                        m_pktFilter->pts = av_rescale_q_rnd(m_pktFilter->pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
                        m_pktFilter->dts = av_rescale_q_rnd(m_pktFilter->dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
                        m_pktFilter->duration = av_rescale_q_rnd(m_pktFilter->duration, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
                        m_pktFilter->stream_index = out_stream->index;

                        int nError = av_interleaved_write_frame(o_fmt_ctx, m_pktFilter);
                        if (nError != 0)
                        {
                            char tmpErrString[AV_ERROR_MAX_STRING_SIZE] = { 0 };
                            av_make_error_string(tmpErrString, AV_ERROR_MAX_STRING_SIZE, nError);

                            fprintf(stderr, "av_interleaved_write_frame audio%d,%s " ,nError ,tmpErrString);
                        }
                    }

                    av_packet_unref(pkt2);
                    av_packet_unref(m_pktFilter);

                }else
                {
            int nError = av_interleaved_write_frame(o_fmt_ctx, pkt2);
            if (nError != 0)
            {
                char tmpErrString[AV_ERROR_MAX_STRING_SIZE] = { 0 };
                av_make_error_string(tmpErrString, AV_ERROR_MAX_STRING_SIZE, nError);

                fprintf(stderr, "av_interleaved_write_frame audio%d,%s " ,nError ,tmpErrString);
            }
             av_packet_unref(pkt2);
        }

通过上面的设置基本可以一边观看RTSP视频一边记录视频。
当然、要稳定的记录还要进行异常处理;还要主要没有内存泄漏。

参考了:https://wanggao1990.blog.csdn.net/article/details/114067251

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值