1.Window下QSV硬解码配置
在libavcodec/codec_list.c下添加
&ff_h264_qsv_decoder,
在ffmpeg_generate.gni下加入
"libavcodec/h264idct.c",
"libavcodec/h264qpel.c",
"libavcodec/startcode.c",
"libavcodec/h264_mp4toannexb_bsf.c",
]
}
ffmpeg_c_sources += [
"libavcodec/qsvenc_h264.c",
"libavcodec/qsvenc.c",
"libavcodec/qsv.c",
"libavcodec/qsvdec.c",
"libavcodec/qsvdec_h2645.c",
]
libavcodec/bsf_list.c下
static const AVBitStreamFilter * const bitstream_filters[] = {
&ff_h264_mp4toannexb_bsf,
&ff_null_bsf,
NULL };
修改win-msvc/x64/config.h配置
#define CONFIG_H264_QSV_DECODER 1
2.QSV硬解码实现
h264_decoder_impl_ffmpeg.cc的实现
#include "modules/video_coding/codecs/h264/h264_decoder_impl_ffmpeg.h"
#include <algorithm>
#include <limits>
extern "C" {
#include "third_party/ffmpeg/libavcodec/avcodec.h"
#include "third_party/ffmpeg/libavformat/avformat.h"
#include "third_party/ffmpeg/libavutil/imgutils.h"
#include "third_party/ffmpeg/libavutil/opt.h"
} // extern "C"
#include "base/checks.h"
#include "base/criticalsection.h"
#include "base/keep_ref_until_done.h"
#include "base/logging.h"
#include "system_wrappers/include/metrics.h"
#include "libyuv/convert.h"
namespace webrtc {
namespace {
#define PRINT_TIME_DECODE_DELAY 0
const AVPixelFormat kPixelFormat = AV_PIX_FMT_YUV420P;
const size_t kYPlaneIndex = 0;
const size_t kUPlaneIndex = 1;
const size_t kVPlaneIndex = 2;
// Used by histograms. Values of entries should not be changed.
enum H264DecoderImplEvent {
kH264DecoderEventInit = 0,
kH264DecoderEventError = 1,
kH264DecoderEventMax = 16,
};
#if defined(WEBRTC_INITIALIZE_FFMPEG)
rtc::CriticalSection ffmpeg_init_lock;
bool ffmpeg_initialized = false;
// Called by FFmpeg to do mutex operations if initialized using
// |InitializeFFmpeg|.
int LockManagerOperation(void** lock, AVLockOp op)
EXCLUSIVE_LOCK_FUNCTION() UNLOCK_FUNCTION() {
switch (op) {
case AV_LOCK_CREATE:
*lock = new rtc::CriticalSection();
return 0;
case AV_LOCK_OBTAIN:
static_cast<rtc::CriticalSection*>(*lock)->Enter();
return 0;
case AV_LOCK_RELEASE:
static_cast<rtc::CriticalSection*>(*lock)->Leave();
return 0;
case AV_LOCK_DESTROY:
delete static_cast<rtc::CriticalSection*>(*lock);
*lock = nullptr;
return 0;
}
RTC_NOTREACHED() << "Unrecognized AVLockOp.";
return -1;
}
void InitializeFFmpeg() {
LOG_F(LS_INFO);
rtc::CritScope cs(&ffmpeg_init_lock);
if (!ffmpeg_initialized) {
if (av_lockmgr_register(LockManagerOperation) < 0) {
RTC_NOTREACHED() << "av_lockmgr_register failed.";
return;
}
av_register_all();
ffmpeg_initialized = true;
}
}
#endif // defined(WEBRTC_INITIALIZE_FFMPEG)
} // namespace
int H264DecoderImplFfmpeg::AVGetBuffer2(
AVCodecContext* context, AVFrame* av_frame, int flags) {
// Set in |InitDecode|.
H264DecoderImplFfmpeg* decoder = static_cast<H264DecoderImplFfmpeg*>(context->opaque);
// DCHECK values set in |InitDecode|.
RTC_DCHECK(decoder);
RTC_DCHECK_EQ(context->pix_fmt, kPixelFormat);
// Necessary capability to be allowed to provide our own buffers.
RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);
// |av_frame->width| and |av_frame->height| are set by FFmpeg. These are the
// actual image's dimensions and may be different from |context->width| and
// |context->coded_width| due to reordering.
int width = av_frame->width;
int height = av_frame->height;
// See |lowres|, if used the decoder scales the image by 1/2^(lowres). This
// has implications on which resolutions are valid, but we don't use it.
RTC_CHECK_EQ(context->lowres, 0);
// Adjust the |width| and |height| to values acceptable by the decoder.
// Without this, FFmpeg may overflow the buffer. If modified, |width| and/or
// |height| are larger than the actual image and the image has to be cropped
// (top-left corner) after decoding to avoid visible borders to the right and
// bottom of the actual image.
avcodec_align_dimensions(context, &width, &height);
RTC_CHECK_GE(width, 0);
RTC_CHECK_GE(height, 0);
int ret = av_image_check_size(static_cast<unsigned int>(width),
static_cast<unsigned int>(height), 0, nullptr);
if (ret < 0) {
LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
decoder->ReportError();
return ret;
}
// The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version
// of a video frame and will be set up to reference |video_frame|'s buffers.
VideoFrame* video_frame = new VideoFrame();
// FFmpeg expects the initial allocation to be zero-initialized according to
// http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
video_frame->set_video_frame_buffer(
decoder->pool_.CreateBuffer(width, height));
// DCHECK that we have a continuous buffer as is required.
RTC_DCHECK_EQ(video_frame->buffer(kUPlane),
video_frame->buffer(kYPlane) + vide