概要
截取一段mp4文件中的5-15s内容,使用ffplay播放
整体架构流程
1.打开输入媒体文件 avformat_open_input
2.获取输入流信息 avformat_find_stream_info
3.创建输出流上下文 avformat_alloc_output_context2
4.创建输出码流的AVStream avformat_new_stream
5.拷贝编码参数 avcodec_parameters_copy
6.写入视频文件头 avformat_write_header
7.输入流跳转到指定时间戳位置 av_seek_frame
8.读取输入视频流 av_read_frame
9.计算pts/dts/duration av_rescale_q_rnd
10.写入视频流数据 av_interleaved_write_frame
11.写入视频文件末尾 av_write_trailer
代码
const char *inFilename = "/home/16THDD/xieyingbo/xieyingbo/output.mp4";
const char *outFilename = "/home/16THDD/xieyingbo/xieyingbo/output_out.mp4";
int startTime = 5, endTime = 15;
av_register_all();
AVFormatContext *inFormatCtx = NULL;
int ret = avformat_open_input(&inFormatCtx, inFilename, NULL, NULL);
if(ret != 0)
{
std::cout << "open input format error." << std::endl;
return;
}
ret = avformat_find_stream_info(inFormatCtx, NULL);
if(ret < 0)
{
std::cout << "find stream error." << std::endl;
return;
}
AVFormatContext *outFormatCtx = NULL;
ret = avformat_alloc_output_context2(&outFormatCtx, NULL, NULL, outFilename);
if(ret < 0)
{
std::cout << "create outpFormatCtx error." << std::endl;
return;
}
for(int i = 0; i < inFormatCtx->nb_streams; i++)
{
AVStream *instream = inFormatCtx->streams[i];
AVStream *outstream = avformat_new_stream(outFormatCtx, NULL);
if(outstream == NULL)
{
std::cout << "create stream error." << std::endl;
return;
}
ret = avcodec_parameters_copy(outstream->codecpar, instream->codecpar);
if(ret < 0)
{
std::cout << "copy streams error." << std::endl;
return;
}
outstream->codecpar->codec_tag = 0;
}
if(!(outFormatCtx->flags & AVFMT_NOFILE))
{
ret = avio_open(&outFormatCtx->pb, outFilename, AVIO_FLAG_WRITE);
if(ret < 0)
{
std::cout << "open outFilename error." << std::endl;
return;
}
}
ret = avformat_write_header(outFormatCtx, NULL);
if(ret < 0)
{
std::cout << "avformat_write_header error." << std::endl;
return;
}
ret = av_seek_frame(inFormatCtx, -1, startTime*AV_TIME_BASE, AVSEEK_FLAG_ANY);
if(ret < 0)
{
std::cout << "seek frame error." << std::endl;
return;
}
int64_t *startPTS = (int64_t *)av_mallocz_array(inFormatCtx->nb_streams, sizeof(int64_t));//保存多少路流的PTS
memset(startPTS, -1, inFormatCtx->nb_streams*sizeof(int64_t));
int64_t *startDTS = (int64_t *)av_mallocz_array(inFormatCtx->nb_streams, sizeof(int64_t));
memset(startDTS, -1, inFormatCtx->nb_streams*sizeof(int64_t));
AVPacket packet;
av_init_packet(&packet);
while(av_read_frame(inFormatCtx, &packet) == 0)
{
AVStream *instream = inFormatCtx->streams[packet.stream_index];
AVStream *outsream = outFormatCtx->streams[packet.stream_index];
if(endTime < packet.pts* av_q2d(instream->time_base))
{
av_packet_unref(&packet);
break;
}
if(startPTS[packet.stream_index] == 0)//起始时间戳
{
startPTS[packet.stream_index] = packet.pts;
}
if(startDTS[packet.stream_index] == 0)
{
startDTS[packet.stream_index] = packet.dts;
}
packet.pts = av_rescale_q(packet.pts - startPTS[packet.stream_index], instream->time_base, outsream->time_base);
packet.dts = av_rescale_q(packet.dts - startDTS[packet.stream_index], instream->time_base, outsream->time_base);
if(packet.pts < 0)
{
packet.pts = 0;
}
if(packet.dts < 0)
{
packet.dts = 0;
}
packet.duration = av_rescale_q(packet.duration, instream->time_base, outsream->time_base);
packet.pos = -1;
//视频的时间是暴力截取的,由于有I、P、B帧存在,DTS应该在PTS之前的
if(packet.pts < packet.dts)
{
continue;
}
ret = av_interleaved_write_frame(outFormatCtx, &packet);//这里会做一个PTS和DTS的校验,如果有问题,写不进去视频帧
av_packet_unref(&packet);
}
av_write_trailer(outFormatCtx);
std::cout << "转换完成" << std::endl;