#include "libavutil/avutil.h"
#include "libavformat/avformat.h"
int main(int argc, char const *argv[])
{
av_log_set_level(AV_LOG_DEBUG);
if (argc < 2)
{
av_log(NULL, AV_LOG_ERROR, "Usage: %s <infilename>\n", argv[0]);
return -1;
}
const char *infileName = argv[1];
AVFormatContext *inFmtCtx = NULL;
avformat_open_input(&inFmtCtx, infileName, NULL, NULL);
avformat_find_stream_info(inFmtCtx, NULL);
av_dump_format(inFmtCtx, 0, infileName, 0);
av_log(NULL, AV_LOG_INFO, "input file duration: %ld us, %lf s\n", inFmtCtx->duration, inFmtCtx->duration * av_q2d(AV_TIME_BASE_Q));
AVRational videoTimeBase;
AVRational audioTimeBase;
for (int i = 0; i < inFmtCtx->nb_streams; i++)
{
AVStream *inStream = inFmtCtx->streams[i];
if (inStream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoTimeBase = inStream->time_base;
av_log(NULL, AV_LOG_INFO, "video time base: num = %d, den = %d\n", videoTimeBase.num, videoTimeBase.den);
}
if (inStream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
{
audioTimeBase = inStream->time_base;
av_log(NULL, AV_LOG_INFO, "audio time base: num = %d, den = %d\n", audioTimeBase.num, audioTimeBase.den);
}
}
AVPacket packet;
av_init_packet(&packet);
while (av_read_frame(inFmtCtx, &packet) == 0)
{
AVStream *inStream = inFmtCtx->streams[packet.stream_index];
//PTS: 显示时间戳,在何时开始显示这一帧的数据。转成时间:PTS*时间基
//DTS: 解码时间戳,在何时开始解码这一帧的数据。转成时间:DTS*时间基
av_log(NULL, AV_LOG_INFO, "streamIndex = %d, pts = %ld, ptsTime = %lf, dts = %ld, dtsTime = %lf\n",
packet.stream_index, packet.pts, packet.pts * av_q2d(inStream->time_base),
packet.dts, packet.dts * av_q2d(inStream->time_base));
}
return 0;
}
1.什么时候停止读取音视频数据?
①frame没有数据 ②读到了目标的秒数
#include "libavutil/avutil.h"
#include "libavformat/avformat.h"
/*
截取封装文件处理流程
*/
int main(int argc, char const *argv[])
{
av_log_set_level(AV_LOG_INFO);
if (argc < 5)
{
av_log(NULL, AV_LOG_ERROR, "Usage : %s <infileName> <startTime> <endTime> <outfileName>\n", argv[0]);
return -1;
}
const char *infileName = argv[1];
int startTime = atoi(argv[2]);
int endTime = atoi(argv[3]);
const char *outfileName = argv[4];
AVFormatContext *inFmtCtx = NULL;
//1.打开输入媒体文件
int ret = avformat_open_input(&inFmtCtx, infileName, NULL, NULL);
if (ret != 0)
{
return -1;
}
//2.获取输入流信息
ret = avformat_find_stream_info(inFmtCtx, NULL);
if (ret < 0)
{
goto fail;
}
AVFormatContext *outFmtCtx = NULL;
//3.创建输出流上下文
ret = avformat_alloc_output_context2(&outFmtCtx, NULL, NULL, outfileName);
if (ret < 0)
{
goto fail;
}
for (int i = 0; i < inFmtCtx->nb_streams; i++)
{
AVStream *inStream = inFmtCtx->streams[i];
//4.创建输出码流的AVStream
AVStream *outStream = avformat_new_stream(outFmtCtx, NULL);
if (outStream == NULL)
{
ret = -1;
goto fail;
}
//5.拷贝编码参数
ret = avcodec_parameters_copy(outStream->codecpar, inStream->codecpar);
if (ret < 0)
{
goto fail;
}
outStream->codecpar->codec_tag = 0;
}
if (!(outFmtCtx->oformat->flags & AVFMT_NOFILE))
{
ret = avio_open(&outFmtCtx->pb, outfileName, AVIO_FLAG_WRITE);
if (ret < 0)
{
goto fail;
}
}
//6.写入视频文件头
ret = avformat_write_header(outFmtCtx, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "write header failed: %s\n", av_err2str(ret));
goto fail;
}
//8.跳转指定时间戳
ret = av_seek_frame(inFmtCtx, -1, startTime * AV_TIME_BASE, AVSEEK_FLAG_ANY);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "seek frame failed: %s\n", av_err2str(ret));
goto fail;
}
int64_t *startPTS = av_mallocz_array(inFmtCtx->nb_streams, sizeof(int64_t));
memset(startPTS, 0, inFmtCtx->nb_streams * sizeof(int64_t));
int64_t *startDTS = av_mallocz_array(inFmtCtx->nb_streams, sizeof(int64_t));
memset(startDTS, 0, inFmtCtx->nb_streams * sizeof(int64_t));
AVPacket packet;
av_init_packet(&packet);
//7.读取输入视频流
while (av_read_frame(inFmtCtx, &packet) == 0)
{
AVStream *inStream = inFmtCtx->streams[packet.stream_index];
AVStream *outStream = outFmtCtx->streams[packet.stream_index];
if (endTime < packet.pts * av_q2d(inStream->time_base))
{
av_packet_unref(&packet);
break;
}
if (startPTS[packet.stream_index] == 0)
{
startPTS[packet.stream_index] = packet.pts;
}
if (startDTS[packet.stream_index] == 0)
{
startDTS[packet.stream_index] = packet.pts;
}
//9.计算pts、dts、duration
packet.pts = av_rescale_q(packet.pts - startPTS[packet.stream_index], inStream->time_base, outStream->time_base);
packet.dts = av_rescale_q(packet.dts - startDTS[packet.stream_index], inStream->time_base, outStream->time_base);
if (packet.pts < 0)
{
packet.pts = 0;
}
if (packet.dts < 0)
{
packet.dts = 0;
}
packet.duration = av_rescale_q(packet.duration, inStream->time_base, outStream->time_base);
packet.pos = -1;
if (packet.pts < packet.dts)
{
av_packet_unref(&packet);
continue;
}
//10.写入视频流数据
ret = av_interleaved_write_frame(outFmtCtx, &packet);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "write frame failed: %s\n", av_err2str(ret));
av_packet_unref(&packet);
break;
}
av_packet_unref(&packet);
}
//11.写入视频文件末尾
av_write_trailer(outFmtCtx);
fail:
if (inFmtCtx)
{
avformat_close_input(&inFmtCtx);
}
if (outFmtCtx && !(outFmtCtx->oformat->flags & AVFMT_NOFILE))
{
avio_closep(&outFmtCtx->pb);
}
if (outFmtCtx)
{
avformat_free_context(outFmtCtx);
}
if (startPTS)
{
av_freep(&startPTS);
}
if (startDTS)
{
av_freep(&startDTS);
}
return ret;
}