H.264 stream formats

There are two H.264 stream formats and they are sometimes called

  1. Annex B
  2. MP4

An H.264 stream is made of NALs (a unit of packaging)

  1. Annex B: has start codes 0x00 0x00 0x00 0x01 NAL 0x00 0x00 0x00
    0x01 NAL etc
  2. MP4: is size prefixed SIZE NAL SIZE NAL etc

The MP4 stream format doesn't contain any NALs of type SPS, PPS or AU delimter.

The Annex B format you'll find in MPEG-2 TS, RTP and some encoders default output.

The MP4 format you'll find in MP4 files. Both formats can be converted into each other.

Annex B -> MP4: remove start codes, insert length of NAL, filter out SPS, PPS and AU delimiter.

MP4 -> Annex B: remove length, insert start code, insert SPS for each I-frame, insert PPS for each frame, insert AU delimiter for each GOP.

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
下面是一个使用FFmpeg库实现将H.265编码的mp4视频转换为H.264编码的mp4视频的C++示例代码: ```cpp #include <iostream> #include <string> #include <cstdlib> #include <cstdio> #include <cstring> #include "ffmpeg.h" using namespace std; #define INBUF_SIZE 4096 #define OUTBUF_SIZE 4096 int main(int argc, char **argv) { if (argc != 3) { cerr << "Usage: " << argv[0] << " <input_file> <output_file>" << endl; return 1; } // Register all codecs and formats av_register_all(); AVFormatContext *input_ctx = NULL, *output_ctx = NULL; AVCodecContext *input_codec_ctx = NULL, *output_codec_ctx = NULL; AVCodec *input_codec = NULL, *output_codec = NULL; AVPacket packet; AVFrame *frame = NULL; uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; uint8_t outbuf[OUTBUF_SIZE]; int ret, got_output; // Open input file if ((ret = avformat_open_input(&input_ctx, argv[1], NULL, NULL)) < 0) { cerr << "Could not open input file: " << argv[1] << endl; goto end; } // Retrieve input stream information if ((ret = avformat_find_stream_info(input_ctx, NULL)) < 0) { cerr << "Could not find stream information" << endl; goto end; } // Open output file if ((ret = avformat_alloc_output_context2(&output_ctx, NULL, NULL, argv[2])) < 0) { cerr << "Could not create output context" << endl; goto end; } // Find the input video stream int video_stream_index = av_find_best_stream(input_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &input_codec, 0); if (video_stream_index < 0) { cerr << "Could not find input video stream" << endl; goto end; } // Create a new video stream in the output file output_codec = avcodec_find_encoder(AV_CODEC_ID_H264); if (!output_codec) { cerr << "Could not find H.264 encoder" << endl; goto end; } AVStream *output_stream = avformat_new_stream(output_ctx, NULL); if (!output_stream) { cerr << "Failed to allocate output stream" << endl; goto end; } output_codec_ctx = avcodec_alloc_context3(output_codec); if (!output_codec_ctx) { cerr << "Failed to allocate codec context" << endl; goto end; } output_codec_ctx->codec_id = AV_CODEC_ID_H264; output_codec_ctx->width = input_codec_ctx->width; output_codec_ctx->height = input_codec_ctx->height; output_codec_ctx->bit_rate = input_codec_ctx->bit_rate; output_codec_ctx->time_base = input_codec_ctx->time_base; output_codec_ctx->gop_size = input_codec_ctx->gop_size; output_codec_ctx->max_b_frames = input_codec_ctx->max_b_frames; output_codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P; if ((ret = avcodec_open2(output_codec_ctx, output_codec, NULL)) < 0) { cerr << "Failed to open output codec" << endl; goto end; } if ((ret = avcodec_parameters_from_context(output_stream->codecpar, output_codec_ctx)) < 0) { cerr << "Failed to copy parameters from codec context to stream" << endl; goto end; } output_stream->time_base = output_codec_ctx->time_base; // Open output file if (!(output_ctx->oformat->flags & AVFMT_NOFILE)) { if ((ret = avio_open(&output_ctx->pb, argv[2], AVIO_FLAG_WRITE)) < 0) { cerr << "Could not open output file: " << argv[2] << endl; goto end; } } // Write file header if ((ret = avformat_write_header(output_ctx, NULL)) < 0) { cerr << "Error writing output file header" << endl; goto end; } // Allocate input and output frame buffers frame = av_frame_alloc(); if (!frame) { cerr << "Could not allocate frame" << endl; goto end; } // Loop through input packets while (1) { // Read packet from input file if ((ret = av_read_frame(input_ctx, &packet)) < 0) break; // If the packet is from the video stream if (packet.stream_index == video_stream_index) { // Decode video frame ret = avcodec_decode_video2(input_codec_ctx, frame, &got_output, &packet); if (ret < 0) { cerr << "Error decoding video frame" << endl; goto end; } // If a frame was decoded if (got_output) { // Convert frame format to YUV420P AVFrame *yuv_frame = av_frame_alloc(); if (!yuv_frame) { cerr << "Could not allocate frame" << endl; goto end; } yuv_frame->width = input_codec_ctx->width; yuv_frame->height = input_codec_ctx->height; yuv_frame->format = AV_PIX_FMT_YUV420P; if ((ret = av_frame_get_buffer(yuv_frame, 32)) < 0) { cerr << "Failed to allocate buffer for YUV420P frame" << endl; goto end; } if ((ret = av_image_alloc(yuv_frame->data, yuv_frame->linesize, input_codec_ctx->width, input_codec_ctx->height, AV_PIX_FMT_YUV420P, 32)) < 0) { cerr << "Failed to allocate image buffer for YUV420P frame" << endl; goto end; } if ((ret = sws_scale(input_codec_ctx->sws_ctx, frame->data, frame->linesize, 0, input_codec_ctx->height, yuv_frame->data, yuv_frame->linesize)) < 0) { cerr << "Error converting frame format to YUV420P" << endl; goto end; } // Encode YUV420P frame as H.264 AVPacket output_packet; av_init_packet(&output_packet); output_packet.data = NULL; output_packet.size = 0; output_packet.stream_index = output_stream->index; ret = avcodec_encode_video2(output_codec_ctx, &output_packet, yuv_frame, &got_output); if (ret < 0) { cerr << "Error encoding video frame" << endl; goto end; } // If the frame was encoded if (got_output) { // Write output packet to file if ((ret = av_write_frame(output_ctx, &output_packet)) < 0) { cerr << "Error writing output packet" << endl; goto end; } } av_frame_free(&yuv_frame); } } // Free input packet av_packet_unref(&packet); } // Flush encoder ret = avcodec_send_frame(output_codec_ctx, NULL); if (ret < 0) { cerr << "Error flushing encoder" << endl; goto end; } while (1) { AVPacket output_packet; av_init_packet(&output_packet); output_packet.data = NULL; output_packet.size = 0; output_packet.stream_index = output_stream->index; ret = avcodec_receive_packet(output_codec_ctx, &output_packet); if (ret == AVERROR_EOF) break; if (ret < 0) { cerr << "Error encoding video frame" << endl; goto end; } if ((ret = av_write_frame(output_ctx, &output_packet)) < 0) { cerr << "Error writing output packet" << endl; goto end; } av_packet_unref(&output_packet); } // Write file trailer if ((ret = av_write_trailer(output_ctx)) < 0) { cerr << "Error writing output file trailer" << endl; goto end; } end: avformat_close_input(&input_ctx); if (output_ctx) { if (!(output_ctx->oformat->flags & AVFMT_NOFILE)) avio_closep(&output_ctx->pb); avformat_free_context(output_ctx); } if (input_codec_ctx) avcodec_close(input_codec_ctx); if (output_codec_ctx) avcodec_close(output_codec_ctx); if (frame) av_frame_free(&frame); return ret; } ``` 在上面的示例代码中,使用了FFmpeg库进行视频编解码相关操作。其中,`AVFormatContext`结构体表示输入输出文件的格式上下文,`AVCodecContext`结构体表示编解码器的上下文,`AVCodec`结构体表示编解码器本身,`AVPacket`结构体表示音视频数据包,`AVFrame`结构体表示音视频帧。 该示例代码的主要思路是: 1. 打开输入文件并获取输入流信息。 2. 创建输出文件并添加视频流。 3. 循环读取输入文件的音视频数据包。 4. 如果数据包是视频数据包,则解码视频帧,将帧格式转换为YUV420P格式,编码为H.264格式,然后写入输出文件。 5. 写入文件尾部。 需要注意的是,该示例代码可能需要根据实际情况进行修改,例如需要根据输入视频的分辨率和帧率设置输出视频的参数,以及选择合适的H.264编码器等。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值