- JVET中PDPC技术原理简介
- 该技术是新增加的技术,HEVC中并不存在
- 参考博客:http://blog.csdn.net/lin453701006/article/details/52735385
- 博客中讲的十分清楚,小结下。
- 根据HEVC预测方式得到最优的预测模式
- 帧内预测方向,得到预测值p
- 如果预测模式非DC也非Planar,则对预测值进行边界滤波,得到滤波后的预测像素p’,其中边界滤波方法采用boundary filter
- 若该CU支持PDPC,则进行以下操作
- 获取未滤波的参考像素r
- 对r进行滤波,得到s
- 根据s进行帧内预测(根据HEVC的方式做帧内预测),得到预测值q
- 根据r和q,得到最终的预测值p’
- 两种方式都做,取RDCost最小的一种
- 根据HEVC预测方式得到最优的预测模式
JEM中代码阅读
- 关键代码,参考predIntraAng
if (iPdpcIdx != 0)
{
#if COM16_C1046_PDPC_RSAF_HARMONIZATION
//get non filtered reference
Pel *ptrSrc = getPredictorPtr(compID, false);
#endif
const Int iBlkSize = iWidth;
const Int iSrcStride = (iWidth<<1) + 1;
const Int iDoubleWidth = iWidth<<1;
#if VCEG_AZ07_INTRA_65ANG_MODES
Int iSelMode = (uiDirMode > 1 ? 18 + ((Int(uiDirMode) - 34)>>1) : uiDirMode);
const Int * pPdpcPar = g_pdpc_pred_param[iBlkSizeGrp][iPdpcIdx][iSelMode];
#else
Int * pPdpcPar = g_pdpc_pred_param[iBlkSizeGrp][iPdpcIdx][uiDirMode];
#endif
Int * piRefVector = piTempRef + iDoubleWidth;
Int * piLowpRefer = piFiltRef + iDoubleWidth;
//unfiltered reference
for (Int j = 0; j <= iDoubleWidth; j++)
piRefVector[j] = ptrSrc[j];
for (Int i = 1; i <= iDoubleWidth; i++)
piRefVector[-i] = ptrSrc[i*iSrcStride];
if (pPdpcPar[5] != 0)
{ // filter reference samples
xReferenceFilter(iBlkSize, pPdpcPar[4], pPdpcPar[5], piRefVector, piLowpRefer);
for (Int j = 0; j <= iDoubleWidth; j++)
ptrSrc[j] = piLowpRefer[j];
for (Int i = 1; i <= iDoubleWidth; i++)
ptrSrc[i*iSrcStride] = piLowpRefer[-i];
}
if (uiDirMode == PLANAR_IDX)
xPredIntraPlanar(ptrSrc + sw + 1, sw, pDst, uiStride, iWidth, iHeight);
else
{
const Bool enableEdgeFilters = !(pcCU->isRDPCMEnabled(uiAbsPartIdx) && pcCU->getCUTransquantBypass(uiAbsPartIdx));
#if O0043_BEST_EFFORT_DECODING
const Int channelsBitDepthForPrediction = rTu.getCU()->getSlice()->getSPS()->getStreamBitDepth(channelType);
#else
const Int channelsBitDepthForPrediction = rTu.getCU()->getSlice()->getSPS()->getBitDepth(channelType);
#endif
#if VCEG_AZ07_INTRA_4TAP_FILTER
const Bool enable4TapFilter = pcCU->getSlice()->getSPS()->getUseIntra4TapFilter();
#endif
//get predicted samples, pDst
xPredIntraAng(channelsBitDepthForPrediction, ptrSrc + sw + 1, sw, pDst, uiStride, iWidth, iHeight, channelType, uiDirMode, enableEdgeFilters
if VCEG_AZ07_INTRA_4TAP_FILTER
, enable4TapFilter
endif
);
}
//use unfiltered reference sample for weighted prediction
if (pPdpcPar[5] != 0)
{
for (int j = 0; j <= iDoubleWidth; j++)
ptrSrc[j] = piRefVector[j];
for (int i = 1; i <= iDoubleWidth; i++)
ptrSrc[i*iSrcStride] = piRefVector[-i];
}
Int scale = (iBlkSize < 32 ? 0 : 1);
Int bitDepth = rTu.getCU()->getSlice()->getSPS()->getBitDepth(channelType);
Int ParShift = 6; //normalization factor
Int ParScale = 1 << ParShift;
Int ParOffset = 1 << (ParShift - 1);
for (Int row = 0; row < iBlkSize; row++)
{
Int pos = row * uiStride;
Int shiftRow = row >> scale;
Int Coeff_Top = pPdpcPar[2] >> shiftRow;
Int Coeff_offset = pPdpcPar[3] >> shiftRow;
for (Int col = 0; col < iBlkSize; col++, pos++)
{
Int shiftCol = col >> scale;
Int Coeff_Left = pPdpcPar[0] >> shiftCol;
Int Coeff_TopLeft = (pPdpcPar[1] >> shiftCol) + Coeff_offset;
Int Coeff_Cur = ParScale - Coeff_Left - Coeff_Top + Coeff_TopLeft;
Int sampleVal = (Coeff_Left* piRefVector[-row - 1] + Coeff_Top * piRefVector[col + 1] - Coeff_TopLeft * piRefVector[0] + Coeff_Cur * pDst[pos] + ParOffset) >> ParShift;
pDst[pos] = Clip3(0, ((1 << bitDepth) - 1), sampleVal);
}
}
return; //terminate the prediction process
}
- 参考像素滤波,xReferenceFilter
const Int imCoeff[3][4] =
{
{ 20, 15, 6, 1 },
{ 16, 14, 7, 3 },
{ 14, 12, 9, 4 }
};
const Int * piFc;
const Int iDoubleSize = 2 * iBlkSize; // symmetric representation
Int * piTmp = &piBinBuff[2 * MAX_CU_SIZE + 4]; // to use negative indexes
Int * piDat = piRefrVector;
Int * piRes = piLowPassRef;for (Int k = -iDoubleSize; k <= iDoubleSize; k++)
piTmp[k] = piDat[k];for (Int n = 1; n <= 3; n++)
{
piTmp[-iDoubleSize - n] = piTmp[-iDoubleSize - 1 + n];
piTmp[iDoubleSize + n] = piTmp[iDoubleSize + 1 - n];
}switch (iFilterOrder)
{
case 0:
break;
case 1:
//3-tap (1 2 1)/4
for (Int k = -iDoubleSize; k <= iDoubleSize; k++)
piRes[k] = ((piTmp[k] << 1) + piTmp[k - 1] + piTmp[k + 1] + 2) >> 2;
break;
case 2:
//5-tap (1, 4, 6, 4, 1)/16
for (Int k = -iDoubleSize; k <= iDoubleSize; k++)
piRes[k] = ((piTmp[k] << 1) + ((piTmp[k] + piTmp[k - 1] + piTmp[k + 1]) << 2) + piTmp[k - 2] + piTmp[k + 2] + 8) >> 4;
break;
case 3:
case 5:
case 7:
//choose 1 filter from below
//7-tap (1, 6, 15, 20, 15, 6, 1)/64
//7-tap (3, 7, 14, 16, 14, 7, 3)/64
//7-tap (4, 9, 12, 14, 12, 9, 7)/64
piFc = imCoeff[(iFilterOrder - 3) >> 1];
for (Int k = -iDoubleSize; k <= iDoubleSize; k++)
{
Int s = 32 + piFc[0] * piTmp[k];
for (Int n = 1; n < 4; n++)
s += piFc[n] * (piTmp[k - n] + piTmp[k + n]);
piRes[k] = s >> 6;
}
break;
default:
printf(“Invalid intra prediction reference filter order %d”, iFilterOrder);
exit(1);
}Int ParShift = 6; //normalization factor
Int ParScale = 1 << ParShift;
Int ParOffset = 1 << (ParShift - 1);- 关键代码,参考predIntraAng
//weight predict using original reference pixels
if (iOrigWeight != 0)
{
Int iCmptWeight = ParScale - iOrigWeight;
for (Int k = -iDoubleSize; k <= iDoubleSize; k++)
piLowPassRef[k] = (iOrigWeight * piRefrVector[k] + iCmptWeight * piLowPassRef[k] + ParOffset) >> ParShift;
}
-