使用FFMPEG库将YUV编码为H264

准备

ffmpeg 4.4

准备一段yuv420p的格式的视频原始数据

这里我们使用命令直接提取

ffmpeg -i .\beautlWorld.mp4 -pixel_format yuv420p -s 1280x720 yuv420p_1280x720.yuv

编码流程

大致可以分为以下几步:

1.初始化编码器并设置参数

2.初始化AVPacket和AVFrame,设置参数

3.读取视频文件,进行编码

4.释放内存,结束

编码

以下是完整代码

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
 
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}
 
using namespace std;
 
const char* inFileName = "./yuv420p_1280x720.yuv";
const char* outFileName = "./encode_yuv420p_1280x720.h264";
 
 
int encode(AVCodecContext* codecContent, AVPacket* packet, AVFrame* frame, FILE* outFile)
{
	//编码
	int ret = avcodec_send_frame(codecContent, frame);
	if (
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是将Mat YUV编码H264流的C++代码示例,使用FFmpeg: ```c++ #include <iostream> #include <fstream> #include <opencv2/opencv.hpp> #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libavutil/imgutils.h> #include <libswscale/swscale.h> int main(int argc, char* argv[]) { // Check input arguments if (argc < 4) { std::cerr << "Usage: " << argv[0] << " input_file width height output_file" << std::endl; return 1; } // Initialize FFmpeg av_register_all(); // Open input file const char* input_file = argv[1]; int width = std::stoi(argv[2]); int height = std::stoi(argv[3]); cv::Mat yuv_image(height * 3 / 2, width, CV_8UC1); std::ifstream input_stream(input_file, std::ios::binary); if (!input_stream.is_open()) { std::cerr << "Failed to open input file" << std::endl; return 1; } // Open output file const char* output_file = argv[4]; AVFormatContext* format_context = nullptr; if (avformat_alloc_output_context2(&format_context, nullptr, nullptr, output_file) < 0) { std::cerr << "Failed to allocate output context" << std::endl; return 1; } // Open output stream AVOutputFormat* output_format = format_context->oformat; AVStream* stream = avformat_new_stream(format_context, nullptr); if (!stream) { std::cerr << "Failed to create output stream" << std::endl; return 1; } stream->id = format_context->nb_streams - 1; AVCodecContext* codec_context = stream->codec; codec_context->codec_id = output_format->video_codec; codec_context->codec_type = AVMEDIA_TYPE_VIDEO; codec_context->width = width; codec_context->height = height; codec_context->pix_fmt = AV_PIX_FMT_YUV420P; codec_context->time_base = { 1, 25 }; if (format_context->oformat->flags & AVFMT_GLOBALHEADER) { codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } AVCodec* codec = avcodec_find_encoder(codec_context->codec_id); if (!codec) { std::cerr << "Failed to find encoder" << std::endl; return 1; } if (avcodec_open2(codec_context, codec, nullptr) < 0) { std::cerr << "Failed to open codec" << std::endl; return 1; } av_dump_format(format_context, 0, output_file, 1); if (!(output_format->flags & AVFMT_NOFILE)) { if (avio_open(&format_context->pb, output_file, AVIO_FLAG_WRITE) < 0) { std::cerr << "Failed to open output file" << std::endl; return 1; } } if (avformat_write_header(format_context, nullptr) < 0) { std::cerr << "Failed to write header" << std::endl; return 1; } // Initialize video converter struct SwsContext* converter = sws_getContext(width, height, AV_PIX_FMT_GRAY8, width, height, AV_PIX_FMT_YUV420P, 0, nullptr, nullptr, nullptr); if (!converter) { std::cerr << "Failed to create video converter" << std::endl; return 1; } // Encode frames AVFrame* frame = av_frame_alloc(); frame->width = width; frame->height = height; frame->format = codec_context->pix_fmt; if (av_frame_get_buffer(frame, 0) < 0) { std::cerr << "Failed to allocate frame buffer" << std::endl; return 1; } AVPacket packet = { 0 }; int frame_count = 0; while (input_stream.read(reinterpret_cast<char*>(yuv_image.data), yuv_image.total())) { // Convert YUV image to AVFrame sws_scale(converter, &yuv_image.data, &yuv_image.step, 0, height, frame->data, frame->linesize); // Encode frame frame->pts = frame_count++; int result = avcodec_send_frame(codec_context, frame); if (result < 0) { std::cerr << "Failed to send frame" << std::endl; return 1; } while (result >= 0) { result = avcodec_receive_packet(codec_context, &packet); if (result == AVERROR(EAGAIN) || result == AVERROR_EOF) { break; } if (result < 0) { std::cerr << "Failed to receive packet" << std::endl; return 1; } av_packet_rescale_ts(&packet, codec_context->time_base, stream->time_base); packet.stream_index = stream->index; if (av_interleaved_write_frame(format_context, &packet) < 0) { std::cerr << "Failed to write packet" << std::endl; return 1; } av_packet_unref(&packet); } } // Flush encoder int result = avcodec_send_frame(codec_context, nullptr); if (result < 0) { std::cerr << "Failed to send frame" << std::endl; return 1; } while (result >= 0) { result = avcodec_receive_packet(codec_context, &packet); if (result == AVERROR(EAGAIN) || result == AVERROR_EOF) { break; } if (result < 0) { std::cerr << "Failed to receive packet" << std::endl; return 1; } av_packet_rescale_ts(&packet, codec_context->time_base, stream->time_base); packet.stream_index = stream->index; if (av_interleaved_write_frame(format_context, &packet) < 0) { std::cerr << "Failed to write packet" << std::endl; return 1; } av_packet_unref(&packet); } // Close output file av_write_trailer(format_context); if (format_context && !(output_format->flags & AVFMT_NOFILE)) { avio_close(format_context->pb); } avcodec_close(codec_context); avformat_free_context(format_context); av_frame_free(&frame); sws_freeContext(converter); input_stream.close(); return 0; } ``` 请注意,此示例假设输入文件为YUV 4:2:0格式的二进制文件,其中Y,U和V平面以交替方式存储。如果您的输入格式不同,请根据需要进行修改。此外,此示例假设您已安装OpenCV和FFmpeg

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值