下面的部分代码记录
/*
*m_pEncSurfaces = new mfxFrameSurface1[4],4对应着EncodeParams结构体里nAsyncDepth值
*分配好Y,U,V的内存
*/
/*
*分配编码器需要用到的内存
*/
mfxFrameSurface1* m_pEncSurfaces = NULL;
m_pEncSurfaces = new mfxFrameSurface1[4];
MSDK_CHECK_POINTER(m_pEncSurfaces, MFX_ERR_MEMORY_ALLOC);
mfxU8* surfaceBuffers = (mfxU8 *)new mfxU8[(ffmpeg_dectx->width*ffmpeg_dectx->height * 12) / 8 * 4];
for (int i = 0; i < 4; i++)
{
memset(&(m_pEncSurfaces[i]), 0, sizeof(mfxFrameSurface1));
MSDK_MEMCPY_VAR(m_pEncSurfaces[i].Info, &(pPipeline->m_mfxEncParams.mfx.FrameInfo), sizeof(mfxFrameInfo));
//m_pEncSurfaces[i]
m_pEncSurfaces[i].Data.Y = &surfaceBuffers[(ffmpeg_dectx->width*ffmpeg_dectx->height * 12) / 8 * i];
m_pEncSurfaces[i].Data.U = m_pEncSurfaces[i].Data.Y + ffmpeg_dectx->width * ffmpeg_dectx->height;
m_pEncSurfaces[i].Data.V = m_pEncSurfaces[i].Data.U + 1;
m_pEncSurfaces[i].Data.Pitch = ffmpeg_dectx->width;
}
/*
*libyuv::I420ToNV12是libyuv库的函数
*mfxFrameSurface1里Data.UV数据和Y数据就是NV12
*/
int m_decodec_flag = 0;
if (0 > avcodec_decode_video2(ffmpeg_dectx, m_frame, &m_decodec_flag, &packet))
{
std::cout << "decodec error" << std::endl;
return -1;
}
if (m_decodec_flag)
{
sws_scale(img_convert_ctx, (const uint8_t* const*)m_frame->data, m_frame->linesize, 0, ffmpeg_dectx->height, m_yuvframe->data, m_yuvframe->linesize);
//YUV转NV12
libyuv::I420ToNV12(m_yuvframe->data[0], ffmpeg_dectx->width,
m_yuvframe->data[1], ffmpeg_dectx->width >> 1,
m_yuvframe->data[2], ffmpeg_dectx->width >> 1,
(uint8_t*)m_pEncSurfaces->Data.Y, ffmpeg_dectx->width,
(uint8_t*)m_pEncSurfaces->Data.UV, ffmpeg_dectx->width,
ffmpeg_dectx->width, ffmpeg_dectx->height);
}