一 代码
ffmpeg版本5.1.2,dll是:ffmpeg-5.1.2-full_build-shared。x64的。
代码是windows端,用VS编译。
怎么使用这个代码?新建win32工程,复制这三个文件。设置ffmpeg库的include和lib目录。
代码在[系列04]编码的基础上改的。
通过宏INTEL_QSV、NVIDIA来区分使用那个编码。
INTEL_QSV cpu是intel就行,输入YUV格式要求NV12。
NVIDIA要求有英伟达显卡,即GPU。输入YUV格式支持更多。
什么时候会用到硬编 ?
答:1 做转码服务器,比如用英伟达显卡。
2 移动端硬编,比如用手机直播。会受限于android系统,android版本低,FrameWork框架层不支持硬编。
/*
author: ashleycoder
CSDN blog: https://blog.csdn.net/chenquangobeijing
*/
#pragma once
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavutil/pixdesc.h>
#include <libavutil/hwcontext.h>
};
#include <thread>
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avcodec.lib")
#define INTEL_QSV 1
//#define NVIDIA 1
class CHardwareEncode
{
public:
CHardwareEncode(void);
~CHardwareEncode(void);
public:
void Start();
int HardwareEncode_Init(AVPixelFormat input_pix_fmt, int yuv_width, int yuv_height);
int EncodeH264_Fun();
void Close();
private:
const AVCodec* m_pVideoHWEncodeCodec = nullptr;
AVCodecContext* m_pVideoHWEncodeCodecCtx = nullptr;
AVBufferRef* m_hw_device_ctx = nullptr;
#ifdef INTEL_QSV
enum AVHWDeviceType m_hw_type = AV_HWDEVICE_TYPE_QSV;
const char * m_enc_name = "h264_qsv";
AVPixelFormat m_nInput_pix_fmt = AV_PIX_FMT_NV12;
AVPixelFormat m_ctx_pix_fmt = AV_PIX_FMT_QSV;
#endif
#ifdef NVIDIA
enum AVHWDeviceType m_hw_type = AV_HWDEVICE_TYPE_CUDA;
const char* m_enc_name = "h264_nvenc";
AVPixelFormat m_nInput_pix_fmt = AV_PIX_FMT_YUV420P;
AVPixelFormat m_ctx_pix_fmt = AV_PIX_FMT_CUDA;
#endif
int m_nFrameHeight;
int m_nFrameWidth;
FILE* m_pH264File = nullptr; //test use
char av_error[AV_ERROR_MAX_STRING_SIZE] = { 0 };
#define av_err2str(errnum) av_make_error_string(av_error, AV_ERROR_MAX_STRING_SIZE, errnum)
};
/*
author: ashleycoder
CSDN blog: https://blog.csdn.net/chenquangobeijing
*/
#include "HardwareEncode.h"
#include <thread>
#include <functional>
#include <codecvt>
#include <locale>
#include <string>
#include <Windows.h>
CHardwareEncode::CHardwareEncode(void)
{
fopen_s(&m_pH264File, "output.264", "wb");
}
CHardwareEncode::~CHardwareEncode(void)
{
Close();
}
void CHardwareEncode::Start()
{
HardwareEncode_Init(m_nInput_pix_fmt, 1920, 1080);
auto video_func = std::bind(&CHardwareEncode::EncodeH264_Fun, this);
std::thread video_thread(video_func);
video_thread.join();
}
int CHardwareEncode::HardwareEncode_Init(AVPixelFormat input_pix_fmt, int yuv_width, int yuv_height)
{
m_nInput_pix_fmt = input_pix_fmt;
m_nFrameWidth = yuv_width;
m_nFrameHeight = yuv_height;
int nRet = -1;
m_pVideoHWEncodeCodec = avcodec_find_encoder_by_name(m_enc_name);
if (m_pVideoHWEncodeCodec == nullptr)
{
return -1;
}
m_pVideoHWEncodeCodecCtx = avcodec_alloc_context3(m_pVideoHWEncodeCodec);
m_pVideoHWEncodeCodecCtx->pix_fmt = m_ctx_pix_fmt; //AV_PIX_FMT_QSV
m_pVideoHWEncodeCodecCtx->width = m_nFrameWidth;
m_pVideoHWEncodeCodecCtx->height = m_nFrameHeight;
m_pVideoHWEncodeCodecCtx->time_base.num = 1;
m_pVideoHWEncodeCodecCtx->time_base.den = 25;
m_pVideoHWEncodeCodecCtx->bit_rate = 4000000;
m_pVideoHWEncodeCodecCtx->sample_aspect_ratio.num = 1;
m_pVideoHWEncodeCodecCtx->sample_aspect_ratio.den = 1;
m_pVideoHWEncodeCodecCtx->gop_size = 25;
m_pVideoHWEncodeCodecCtx->max_b_frames = 0;
//和软编的区别
av_hwdevice_ctx_create(&m_hw_device_ctx, m_hw_type, nullptr, nullptr, 0);
if (m_hw_device_ctx == nullptr)
{
printf("av_hwdevice_ctx_create fail\n");
return -1;
}
AVBufferRef * hw_frames_ref = av_hwframe_ctx_alloc(m_hw_device_ctx);
if (hw_frames_ref == nullptr)
{
return -1;
}
AVHWFramesContext *frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
frames_ctx->format = m_ctx_pix_fmt;
frames_ctx->sw_format = m_nInput_pix_fmt;
frames_ctx->width = m_nFrameWidth;
frames_ctx->height = m_nFrameHeight;
frames_ctx->initial_pool_size = 20;
nRet = av_hwframe_ctx_init(hw_frames_ref);
if (nRet < 0)
{
printf("av_hwframe_ctx_init fail: %s\n", av_err2str(nRet));
av_buffer_unref(&hw_frames_ref);
return -1;
}
//注意区别:新增hw_frames_ctx
m_pVideoHWEncodeCodecCtx->hw_frames_ctx = av_buffer_ref(hw_frames_ref);
if (m_pVideoHWEncodeCodecCtx->hw_frames_ctx == nullptr)
{
av_buffer_unref(&hw_frames_ref);
return -1;
}
av_buffer_unref(&hw_frames_ref);
if (avcodec_open2(m_pVideoHWEncodeCodecCtx, m_pVideoHWEncodeCodec, nullptr) < 0)
{
printf("avcodec_open2 fail! \n");
return -1;
}
return 0;
}
int CHardwareEncode::EncodeH264_Fun()
{
int nRet = -1;
int success_num = 0;
int nFrameNum = 600;
FILE* pInFile = nullptr;
#ifdef INTEL_QSV
fopen_s(&pInFile, "1920_1080_nv12.yuv", "rb");
#endif
#ifdef NVIDIA
fopen_s(&pInFile, "1920x1080_yuv420p.yuv", "rb");
#endif
int YSize = m_nFrameHeight* m_nFrameWidth;
AVPacket* video_pkt = av_packet_alloc();
for (int k = 0; k < nFrameNum; k++)
{
AVFrame *sw_frame = av_frame_alloc();
sw_frame->width = m_nFrameWidth;
sw_frame->height = m_nFrameHeight;
sw_frame->format = m_nInput_pix_fmt;
nRet = av_frame_get_buffer(sw_frame, 0);
if (fread((uint8_t*)(sw_frame->data[0]), 1, YSize, pInFile) <= 0) {
printf("Failed to read raw data! \n");
return -1;
}
#ifdef INTEL_QSV
if (fread((uint8_t*)(sw_frame->data[1]), 1, YSize/2, pInFile) <= 0) {
printf("Failed to read raw data! \n");
return -1;
}
#endif
#ifdef NVIDIA
if (fread((uint8_t*)(sw_frame->data[1]), 1, YSize/4, pInFile) <= 0) {
printf("Failed to read raw data! \n");
return -1;
}
if (fread((uint8_t*)(sw_frame->data[2]), 1, YSize/4, pInFile) <= 0) {
printf("Failed to read raw data! \n");
return -1;
}
#endif
AVFrame *hw_frame = av_frame_alloc();
nRet = av_hwframe_get_buffer(m_pVideoHWEncodeCodecCtx->hw_frames_ctx, hw_frame, 0);
if (nRet < 0)
{
break;
}
//把帧拷贝到设备帧(硬件帧)
nRet = av_hwframe_transfer_data(hw_frame, sw_frame, 0);
if (nRet < 0)
{
break;
}
int video_send_frame_ret = avcodec_send_frame(m_pVideoHWEncodeCodecCtx, hw_frame);
//printf("encode video send_frame %d\n", video_send_frame_ret);
if (video_send_frame_ret >= 0) {
int video_receive_packet_ret = avcodec_receive_packet(m_pVideoHWEncodeCodecCtx, video_pkt);
char* err_str = av_err2str(video_receive_packet_ret);
printf("avcodec_receive_packet: %d, %s \n", video_receive_packet_ret, err_str);
if (video_receive_packet_ret == AVERROR(EAGAIN) || video_receive_packet_ret == AVERROR_EOF) {
//break;
}
else if (video_receive_packet_ret < 0) {
printf("Error encoding audio frame\n");
//break;
}
if (video_pkt->size > 0)
{
video_pkt->stream_index = 0;
printf("video_pkt->size=%d\r\n", video_pkt->size);
fwrite(video_pkt->data, 1, video_pkt->size, m_pH264File);
av_packet_unref(video_pkt);
}
}
av_frame_free(&hw_frame);
av_frame_free(&sw_frame);
}
// //不能掉,否则帧数不够
//这部分代码要重写
//avcodec_send_frame(m_pVideoEncodeCodecCtx, nullptr);
//while (true) {
// int ret = avcodec_receive_packet(m_pVideoEncodeCodecCtx, video_pkt);
// if (ret == 0) {
// ret = av_interleaved_write_frame(m_pOutputFormatCtx, video_pkt);
// printf("video write_ret:%d, success_num=%d\r\n", ret, ++success_num);
// av_packet_unref(video_pkt);
// }
// else if (ret == AVERROR_EOF) {
// break;
// }
// else {
// break;
// }
//}
return 0;
}
void CHardwareEncode::Close()
{
avcodec_close(m_pVideoHWEncodeCodecCtx);
avcodec_free_context(&m_pVideoHWEncodeCodecCtx);
}
#include <iostream>
#include <Windows.h>
#include "5__HardwareEncode/HardwareEncode.h"
int main()
{
CHardwareEncode* m_pHWEncodeVideo = new CHardwareEncode();
m_pHWEncodeVideo->Start();
return 0;
}
二 注意的问题
1 a 把编码器替换成h264_nvenc。b 编码器替换成h264_nvenc、使用hw_frames_ctx。
两者之间的区别?即hw_frames_ctx的作用?
答:避免了CPU和GPU之间的数据拷贝,提高了编码效率。
2 编码卡的编码速度?
根据不同型号卡,实际使用场景测试了。
3 PAR:pixel aspect ratio,DAR = SAR x PAR
显示宽高比 = 采集宽高比*像素宽高比