extern "C"
{
#include "libavutil/timestamp.h"
#include "libavformat/avformat.h"
};
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
// 获得对应的base
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("%s: pts: %s pkt_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
tag,
// 将转为字符串
av_ts2str(pkt->pts),
// 对应的事件的pts转为具体的时间
av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts),
av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration),
av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
void av_packet_free(AVPacket* pkt)
{
if(!pkt)
{
return;
}
av_packet_unref(pkt);
av_freep(pkt);
}
int cut_video(double from_seconds, double end_seconds, const char *in_filename, const char *out_filename)
{
AVOutputFormat *ofmt = nullptr;
AVFormatContext *ifmt_ctx = nullptr;
AVFormatContext *ofmt_ctx = nullptr;
int video_stream_index = -1;
int64_t *dts_start_from = nullptr;
int64_t *pts_start_from = nullptr;
AVPacket pkt;
int ret, i;
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0)
{
fprintf(stderr, "Error opening input file %s\n", in_filename);
goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)
{
fprintf(stderr, "can't find stream infon\n");
goto end;
}
// 输出文件流的信息
av_dump_format(ifmt_ctx, 0, in_filename, 0);
// 创建输出上下文信息
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx)
{
fprintf(stderr, "could not allocate output context");
ret = AVERROR_UNKNOWN;
goto end;
}
ofmt = ofmt_ctx->oformat;
for (int i = 0; i < (int)ifmt_ctx->nb_streams; i++)
{
AVStream *in_stream = ifmt_ctx->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream)
{
fprintf(stderr, "Error:failed to allocate output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
if (ret < 0)
{
fprintf(stderr, "failed to copy parameters\n");
goto end;
}
out_stream->codecpar->codec_tag = 0;
// 打印输出信息
av_dump_format(ofmt_ctx, 0, out_filename, 1);
if (!(ofmt->flags & AVFMT_NOFILE))
{
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0)
{
goto end;
}
}
avformat_write_header(ofmt_ctx, nullptr);
if (ret < 0)
{
fprintf(stderr, "Error opening");
goto end;
}
if(out_stream->time_base.den == 0 || out_stream->time_base.num == 0)
{
out_stream->time_base = in_stream->time_base;
}
if(in_stream->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
{
continue;
}
video_stream_index = i;
int startSecond = 8;
int64_t startBaseTime = av_rescale_q(from_seconds * AV_TIME_BASE, AV_TIME_BASE_Q, in_stream->time_base);
int flags = AVSEEK_FLAG_BACKWARD; //默认使用
if(startBaseTime > 0 && startBaseTime < in_stream->duration)
{
// H.264 I frames don't always register as "keyframes" in FFmpeg
flags |= AVSEEK_FLAG_ANY; //加了这个,有时定位更不准
}
ret = av_seek_frame(ifmt_ctx, i, (startBaseTime + in_stream->start_time), flags);
//ret = av_seek_frame(ifmt_ctx, -1, from_seconds * AV_TIME_BASE, AVSEEK_FLAG_ANY);
// 定位到目标时间点
if (ret < 0)
{
fprintf(stderr, "Error seek!\n");
goto end;
}
}
dts_start_from = (int64_t *)malloc(sizeof(int64_t) * ifmt_ctx->nb_streams);
memset(dts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);
pts_start_from = (int64_t *)malloc(sizeof(int64_t) * ifmt_ctx->nb_streams);
memset(pts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);
while (1)
{
AVStream *pkt_in_stream;
AVStream *pkt_out_stream;
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret < 0)
{
break;
}
pkt_in_stream = ifmt_ctx->streams[pkt.stream_index];
pkt_out_stream = ofmt_ctx->streams[pkt.stream_index];
log_packet(ifmt_ctx, &pkt, "in");
if(video_stream_index == pkt.stream_index)
{
if (av_q2d(pkt_in_stream->time_base) * (pkt.dts - pkt_in_stream->first_dts) > end_seconds)
{
av_packet_free(&pkt);
break;
}
}
else
{
av_packet_free(&pkt);
continue;
}
if (pts_start_from[pkt.stream_index] == 0)
{
pts_start_from[pkt.stream_index] = pkt.pts;
}
if (dts_start_from[pkt.stream_index] == 0)
{
dts_start_from[pkt.stream_index] = pkt.dts;
}
pkt.dts = av_rescale_q_rnd(pkt.dts - dts_start_from[pkt.stream_index], pkt_in_stream->time_base, pkt_out_stream->time_base, AV_ROUND_NEAR_INF);
pkt.pts = av_rescale_q_rnd(pkt.pts - pts_start_from[pkt.stream_index], pkt_in_stream->time_base, pkt_out_stream->time_base, AV_ROUND_NEAR_INF);
pkt.duration = av_rescale_q(pkt.duration, pkt_in_stream->time_base, pkt_out_stream->time_base);
pkt.pos = -1;
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
if (ret < 0)
{
break;
}
av_packet_free(&pkt);
}
end:
if(dts_start_from)
{
free(dts_start_from);
dts_start_from = nullptr;
}
if(pts_start_from)
{
free(pts_start_from);
pts_start_from = nullptr;
}
av_write_trailer(ofmt_ctx);
avformat_close_input(&ifmt_ctx);
// g关闭输出的缓冲区的大小
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
{
avio_closep(&ofmt_ctx->pb);
}
avformat_free_context(ofmt_ctx);
return 0;
}