[FFmpeg] 如何截取.mp4文件5-10s内的内容

概要

截取一段mp4文件中的5-15s内容,使用ffplay播放

整体架构流程

1.打开输入媒体文件                                                avformat_open_input
2.获取输入流信息                                                   avformat_find_stream_info
3.创建输出流上下文                                               avformat_alloc_output_context2
4.创建输出码流的AVStream                                  avformat_new_stream
5.拷贝编码参数                                                      avcodec_parameters_copy
6.写入视频文件头                                                  avformat_write_header
7.输入流跳转到指定时间戳位置                            av_seek_frame
8.读取输入视频流                                                  av_read_frame
9.计算pts/dts/duration                                        av_rescale_q_rnd
10.写入视频流数据                                               av_interleaved_write_frame
11.写入视频文件末尾                                           av_write_trailer
    

代码

    const char *inFilename = "/home/16THDD/xieyingbo/xieyingbo/output.mp4";
    const char *outFilename = "/home/16THDD/xieyingbo/xieyingbo/output_out.mp4";
    int startTime = 5, endTime = 15;

    av_register_all();

    AVFormatContext *inFormatCtx = NULL;
    int ret = avformat_open_input(&inFormatCtx, inFilename, NULL, NULL);
    if(ret != 0)
    {
        std::cout << "open input format error." << std::endl;
        return;
    }

    ret = avformat_find_stream_info(inFormatCtx, NULL);
    if(ret < 0)
    {
        std::cout << "find stream error." << std::endl;
        return;
    }

    AVFormatContext *outFormatCtx = NULL;
    ret = avformat_alloc_output_context2(&outFormatCtx, NULL, NULL, outFilename);
    if(ret < 0)
    {
        std::cout << "create outpFormatCtx error." << std::endl;
        return;
    }

    for(int i = 0; i < inFormatCtx->nb_streams; i++)
    {
        AVStream *instream = inFormatCtx->streams[i];
        AVStream *outstream = avformat_new_stream(outFormatCtx, NULL);
        if(outstream == NULL)
        {
            std::cout << "create stream error." << std::endl;
            return;
        }
        ret = avcodec_parameters_copy(outstream->codecpar, instream->codecpar);
        if(ret < 0)
        {
            std::cout << "copy streams error." << std::endl;
            return;
        }
        outstream->codecpar->codec_tag = 0;
    }

    if(!(outFormatCtx->flags & AVFMT_NOFILE))
    {
        ret = avio_open(&outFormatCtx->pb, outFilename, AVIO_FLAG_WRITE);
        if(ret < 0)
        {
            std::cout << "open outFilename error." << std::endl;
            return;
        }
    }

    ret = avformat_write_header(outFormatCtx, NULL);
    if(ret < 0)
    {
        std::cout << "avformat_write_header error." << std::endl;
        return;
    }

    ret = av_seek_frame(inFormatCtx, -1, startTime*AV_TIME_BASE, AVSEEK_FLAG_ANY);
    if(ret < 0)
    {
        std::cout << "seek frame error." << std::endl;
        return;
    }

    int64_t *startPTS = (int64_t *)av_mallocz_array(inFormatCtx->nb_streams, sizeof(int64_t));//保存多少路流的PTS
    memset(startPTS, -1, inFormatCtx->nb_streams*sizeof(int64_t));
    int64_t *startDTS = (int64_t *)av_mallocz_array(inFormatCtx->nb_streams, sizeof(int64_t));
    memset(startDTS, -1, inFormatCtx->nb_streams*sizeof(int64_t));

    AVPacket packet;
    av_init_packet(&packet);
    while(av_read_frame(inFormatCtx, &packet) == 0)
    {
        AVStream *instream = inFormatCtx->streams[packet.stream_index];
        AVStream *outsream = outFormatCtx->streams[packet.stream_index];

        if(endTime < packet.pts* av_q2d(instream->time_base))
        {
            av_packet_unref(&packet);
            break;
        }

        if(startPTS[packet.stream_index] == 0)//起始时间戳
        {
            startPTS[packet.stream_index] = packet.pts;
        }
        if(startDTS[packet.stream_index] == 0)
        {
            startDTS[packet.stream_index] = packet.dts;
        }

        packet.pts = av_rescale_q(packet.pts - startPTS[packet.stream_index], instream->time_base, outsream->time_base);
        packet.dts = av_rescale_q(packet.dts - startDTS[packet.stream_index], instream->time_base, outsream->time_base);
        if(packet.pts < 0)
        {
            packet.pts = 0;
        }
        if(packet.dts < 0)
        {
            packet.dts = 0;
        }
        packet.duration = av_rescale_q(packet.duration, instream->time_base, outsream->time_base);
        packet.pos = -1;
        //视频的时间是暴力截取的,由于有I、P、B帧存在,DTS应该在PTS之前的
        if(packet.pts < packet.dts)
        {
            continue;
        }

        ret = av_interleaved_write_frame(outFormatCtx, &packet);//这里会做一个PTS和DTS的校验,如果有问题,写不进去视频帧
        av_packet_unref(&packet);
    }
    av_write_trailer(outFormatCtx);

    std::cout << "转换完成" << std::endl;

 

小结

使用ffplay对截取的mp4文件进行播放。

补充

个人对时间基的一点理解:刻画时间的一个标准,把一段时间分成多少份的份数。
PTS:播放时间戳
DTS:解码时间戳
DTS >= PTS
  • 8
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值