ffmpeg版本4.3
static AVStream* add_output_stream(AVFormatContext* output_format_context, AVStream* input_stream)
{
AVCodecContext* input_codec_context = nullptr;
AVCodecContext* output_codec_context = nullptr;
AVStream* output_stream = nullptr;
output_stream = avformat_new_stream(output_format_context, nullptr);
if (!output_stream)
{
printf("Call av_new_stream function failed\n");
return nullptr;
}
;
AVCodec * input_codec = avcodec_find_decoder(input_stream->codecpar->codec_id);
//AVCodecContext* pInCodecCtx = avcodec_alloc_context3(input_codec);
input_codec_context = input_stream->codec;
output_codec_context = output_stream->codec;
output_codec_context->codec_id = input_codec_context->codec_id;
output_codec_context->codec_type = input_codec_context->codec_type;
output_codec_context->codec_tag = input_codec_context->codec_tag;
output_codec_context->bit_rate = input_codec_context->bit_rate;
output_codec_context->extradata = input_codec_context->extradata;
output_codec_context->extradata_size = input_codec_context->extradata_size;
if (av_q2d(input_codec_context->time_base) * input_codec_context->ticks_per_frame > av_q2d(input_stream->time_base) && av_q2d(input_stream->time_base) < 1.0 / 1000)
{
output_codec_context->time_base = input_codec_context->time_base;
output_codec_context->time_base.num *= input_codec_context->ticks_per_frame;
}
else
{
output_codec_context->time_base = input_stream->time_base;
}
switch (input_codec_context->codec_type)
{
case AVMEDIA_TYPE_AUDIO:
{
output_codec_context->channel_layout = input_codec_context->channel_layout;
output_codec_context->sample_rate = input_codec_context->sample_rate;
output_codec_context->channels = input_codec_context->channels;
output_codec_context->frame_size = input_codec_context->frame_size;
if ((input_codec_context->block_align == 1 && input_codec_context->codec_id == AV_CODEC_ID_MP3) || input_codec_context->codec_id == AV_CODEC_ID_AC3)
{
output_codec_context->block_align = 0;
}
else
{
output_codec_context->block_align = input_codec_context->block_align;
}
break;
}
case AVMEDIA_TYPE_VIDEO:
{
output_codec_context->pix_fmt = input_codec_context->pix_fmt;
output_codec_context->width = input_codec_context->width;
output_codec_context->height = input_codec_context->height;
output_codec_context->has_b_frames = input_codec_context->has_b_frames;
if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
{
output_codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
break;
}
default:
break;
}
return output_stream;
}
static void test2()
{
int nRet = 0;
AVFormatContext *pInFmtCtx = nullptr;
const char *pInFileName = "D:/videos/264.dat";
const char *pOutFileName = "D:/videos/264.ts";
AVDictionary *pDic = nullptr;
nRet = avformat_open_input(&pInFmtCtx,pInFileName,nullptr,&pDic);
if( nRet < 0)
{
printf("Could not open input file.");
return;
}
avformat_find_stream_info(pInFmtCtx, nullptr);
printf("===========Input Information==========\n");
av_dump_format(pInFmtCtx, 0, pInFileName, 0);
printf("======================================\n");
// //Method1 方法1.组合使用几个函数
// AVOutputFormat* pMp4OutFormat = avformat_alloc_context();
// //Guess Format 猜格式
// AVOutputFormat *fmt = av_guess_format(NULL, pOutFileName, NULL);
// AVFormatContext *pMp4FmtCtx->oformat = fmt;
//Output
AVFormatContext *pTsFmtCtx = nullptr;
AVOutputFormat* pTsOutFormat = nullptr;
avformat_alloc_output_context2(&pTsFmtCtx , nullptr, nullptr, pOutFileName);
if (!pTsFmtCtx ) {
printf("Could not create output context\n");
return;
}
pTsOutFormat = pTsFmtCtx->oformat;
if (avio_open(&(pTsFmtCtx->pb), pOutFileName, AVIO_FLAG_READ_WRITE) < 0)
{
printf("avio_open fail.");
return;
}
unsigned int i = 0,videoindex_v=0;
int videoindex_out=0;
for (;i < pInFmtCtx->nb_streams; i++)
{
AVStream *in_stream = pInFmtCtx->streams[i];
AVCodec *pInCodec = avcodec_find_decoder(in_stream->codecpar->codec_id);
AVCodecContext* pInCodecCtx = avcodec_alloc_context3(pInCodec); //
nRet = avcodec_parameters_to_context(pInCodecCtx, pInFmtCtx->streams[videoindex_v]->codecpar);
AVStream *out_stream = avformat_new_stream(pTsFmtCtx, pInCodecCtx->codec);
if (!out_stream) {
printf("Failed allocating output stream\n");
return;
}
videoindex_v = i;
videoindex_out = out_stream->index;
if( nRet < 0)
{
printf("avcodec_parameters_to_context fail.\n");
return;
}
pInCodecCtx->codec_tag = 0;
if (pTsFmtCtx->oformat->flags & AVFMT_GLOBALHEADER)
pInCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
nRet = avcodec_parameters_from_context(out_stream->codecpar, pInCodecCtx);
if( nRet < 0)
{
printf("avcodec_parameters_from_context fail.\n");
return;
}
}
printf("==========Output Information==========\n");
av_dump_format(pTsFmtCtx, 0, pOutFileName, 1);
printf("======================================\n");
//Write file header
if (avformat_write_header(pTsFmtCtx, nullptr) < 0) {
printf("Error occurred when opening output file\n");
return;
}
AVPacket pkt;
int frame_index = 0;
int64_t cur_pts_v = 0;
while (1) {
AVFormatContext *ifmt_ctx;
int stream_index = 0;
AVStream *in_stream, *out_stream;
//Get an AVPacket
//if(av_compare_ts(cur_pts_v,ifmt_ctx_v->streams[videoindex_v]->time_base,cur_pts_a,ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0)
{
ifmt_ctx = pInFmtCtx;
stream_index = videoindex_out;
if (av_read_frame(ifmt_ctx, &pkt) >= 0) {
do {
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = pTsFmtCtx->streams[stream_index];
printf("stream_index==%d,pkt.stream_index==%d,videoindex_v=%d\n", stream_index, pkt.stream_index, videoindex_v);
if (pkt.stream_index == videoindex_v) {
//FIX:No PTS (Example: Raw H.264)
//Simple Write PTS
if (pkt.pts == AV_NOPTS_VALUE) {
//Write PTS
AVRational time_base1 = in_stream->time_base;
//Duration between 2 frames (us)
int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
//Parameters
pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
pkt.dts = pkt.pts;
pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
frame_index++;
}
cur_pts_v = pkt.pts;
break;
}
} while (av_read_frame(ifmt_ctx, &pkt) >= 0);
}
else {
break;
}
}
//Convert PTS/DTS
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
pkt.stream_index = stream_index;
int count = 1;
printf("Write %d Packet. size:%5d\tpts:%lld\n", count,pkt.size, pkt.pts);
//Write
if (av_interleaved_write_frame(pTsFmtCtx, &pkt) < 0) {
printf("Error muxing packet\n");
break;
}
count++;
av_packet_unref(&pkt);
}
//Write file trailer
av_write_trailer(pTsFmtCtx);
std::cout<<"h264to ts end";
}
void mian()
{
test2();
printf("end \n");
}