Position Dependent Intra Prediction Combination(PDPC)

  • JVET中PDPC技术原理简介
  • 该技术是新增加的技术,HEVC中并不存在
  • 参考博客:http://blog.csdn.net/lin453701006/article/details/52735385
  • 博客中讲的十分清楚,小结下。
    • 根据HEVC预测方式得到最优的预测模式
      1. 帧内预测方向,得到预测值p
      2. 如果预测模式非DC也非Planar,则对预测值进行边界滤波,得到滤波后的预测像素p’,其中边界滤波方法采用boundary filter
    • 若该CU支持PDPC,则进行以下操作
      1. 获取未滤波的参考像素r
      2. 对r进行滤波,得到s
      3. 根据s进行帧内预测(根据HEVC的方式做帧内预测),得到预测值q
      4. 根据r和q,得到最终的预测值p’
        • 两种方式都做,取RDCost最小的一种

  • JEM中代码阅读

    • 关键代码,参考predIntraAng
       
      if (iPdpcIdx != 0)
      {
      #if COM16_C1046_PDPC_RSAF_HARMONIZATION
      //get non filtered reference
      Pel *ptrSrc = getPredictorPtr(compID, false);
      #endif
      const Int iBlkSize = iWidth;
      const Int iSrcStride = (iWidth<<1) + 1;
      const Int iDoubleWidth = iWidth<<1;
      #if VCEG_AZ07_INTRA_65ANG_MODES
      Int iSelMode = (uiDirMode > 1 ? 18 + ((Int(uiDirMode) - 34)>>1) : uiDirMode);
      const Int * pPdpcPar = g_pdpc_pred_param[iBlkSizeGrp][iPdpcIdx][iSelMode];
      #else
      Int * pPdpcPar = g_pdpc_pred_param[iBlkSizeGrp][iPdpcIdx][uiDirMode];
      #endif
      Int * piRefVector = piTempRef + iDoubleWidth;
      Int * piLowpRefer = piFiltRef + iDoubleWidth;
      //unfiltered reference
      for (Int j = 0; j <= iDoubleWidth; j++)
      piRefVector[j] = ptrSrc[j];
      for (Int i = 1; i <= iDoubleWidth; i++)
      piRefVector[-i] = ptrSrc[i*iSrcStride];
      if (pPdpcPar[5] != 0)
      { // filter reference samples
      xReferenceFilter(iBlkSize, pPdpcPar[4], pPdpcPar[5], piRefVector, piLowpRefer);
      for (Int j = 0; j <= iDoubleWidth; j++)
      ptrSrc[j] = piLowpRefer[j];
      for (Int i = 1; i <= iDoubleWidth; i++)
      ptrSrc[i*iSrcStride] = piLowpRefer[-i];
      }
      if (uiDirMode == PLANAR_IDX)
      xPredIntraPlanar(ptrSrc + sw + 1, sw, pDst, uiStride, iWidth, iHeight);
      else
      {
      const Bool enableEdgeFilters = !(pcCU->isRDPCMEnabled(uiAbsPartIdx) && pcCU->getCUTransquantBypass(uiAbsPartIdx));
      #if O0043_BEST_EFFORT_DECODING
      const Int channelsBitDepthForPrediction = rTu.getCU()->getSlice()->getSPS()->getStreamBitDepth(channelType);
      #else
      const Int channelsBitDepthForPrediction = rTu.getCU()->getSlice()->getSPS()->getBitDepth(channelType);
      #endif
      #if VCEG_AZ07_INTRA_4TAP_FILTER
      const Bool enable4TapFilter = pcCU->getSlice()->getSPS()->getUseIntra4TapFilter();
      #endif
      //get predicted samples, pDst
      xPredIntraAng(channelsBitDepthForPrediction, ptrSrc + sw + 1, sw, pDst, uiStride, iWidth, iHeight, channelType, uiDirMode, enableEdgeFilters
      if VCEG_AZ07_INTRA_4TAP_FILTER
      , enable4TapFilter
      endif
      );
      }
      //use unfiltered reference sample for weighted prediction
      if (pPdpcPar[5] != 0)
      {
      for (int j = 0; j <= iDoubleWidth; j++)
      ptrSrc[j] = piRefVector[j];
      for (int i = 1; i <= iDoubleWidth; i++)
      ptrSrc[i*iSrcStride] = piRefVector[-i];
      }
      Int scale = (iBlkSize < 32 ? 0 : 1);
      Int bitDepth = rTu.getCU()->getSlice()->getSPS()->getBitDepth(channelType);
      Int ParShift = 6; //normalization factor
      Int ParScale = 1 << ParShift;
      Int ParOffset = 1 << (ParShift - 1);
      for (Int row = 0; row < iBlkSize; row++)
      {
      Int pos = row * uiStride;
      Int shiftRow = row >> scale;
      Int Coeff_Top = pPdpcPar[2] >> shiftRow;
      Int Coeff_offset = pPdpcPar[3] >> shiftRow;
      for (Int col = 0; col < iBlkSize; col++, pos++)
      {
      Int shiftCol = col >> scale;
      Int Coeff_Left = pPdpcPar[0] >> shiftCol;
      Int Coeff_TopLeft = (pPdpcPar[1] >> shiftCol) + Coeff_offset;
      Int Coeff_Cur = ParScale - Coeff_Left - Coeff_Top + Coeff_TopLeft;
      Int sampleVal = (Coeff_Left* piRefVector[-row - 1] + Coeff_Top * piRefVector[col + 1] - Coeff_TopLeft * piRefVector[0] + Coeff_Cur * pDst[pos] + ParOffset) >> ParShift;
      pDst[pos] = Clip3(0, ((1 << bitDepth) - 1), sampleVal);
      }
      }
      return; //terminate the prediction process
      }
    • 参考像素滤波,xReferenceFilter
       
      const Int imCoeff[3][4] =
      {
      { 20, 15, 6, 1 },
      { 16, 14, 7, 3 },
      { 14, 12, 9, 4 }
      };

    const Int * piFc;
    const Int iDoubleSize = 2 * iBlkSize; // symmetric representation
    Int * piTmp = &piBinBuff[2 * MAX_CU_SIZE + 4]; // to use negative indexes
    Int * piDat = piRefrVector;
    Int * piRes = piLowPassRef;

    for (Int k = -iDoubleSize; k <= iDoubleSize; k++)
    piTmp[k] = piDat[k];

    for (Int n = 1; n <= 3; n++)
    {
    piTmp[-iDoubleSize - n] = piTmp[-iDoubleSize - 1 + n];
    piTmp[iDoubleSize + n] = piTmp[iDoubleSize + 1 - n];
    }

    switch (iFilterOrder)
    {
    case 0:
    break;
    case 1:
    //3-tap (1 2 1)/4
    for (Int k = -iDoubleSize; k <= iDoubleSize; k++)
    piRes[k] = ((piTmp[k] << 1) + piTmp[k - 1] + piTmp[k + 1] + 2) >> 2;
    break;
    case 2:
    //5-tap (1, 4, 6, 4, 1)/16
    for (Int k = -iDoubleSize; k <= iDoubleSize; k++)
    piRes[k] = ((piTmp[k] << 1) + ((piTmp[k] + piTmp[k - 1] + piTmp[k + 1]) << 2) + piTmp[k - 2] + piTmp[k + 2] + 8) >> 4;
    break;
    case 3:
    case 5:
    case 7:
    //choose 1 filter from below
    //7-tap (1, 6, 15, 20, 15, 6, 1)/64
    //7-tap (3, 7, 14, 16, 14, 7, 3)/64
    //7-tap (4, 9, 12, 14, 12, 9, 7)/64
    piFc = imCoeff[(iFilterOrder - 3) >> 1];
    for (Int k = -iDoubleSize; k <= iDoubleSize; k++)
    {
    Int s = 32 + piFc[0] * piTmp[k];
    for (Int n = 1; n < 4; n++)
    s += piFc[n] * (piTmp[k - n] + piTmp[k + n]);
    piRes[k] = s >> 6;
    }
    break;
    default:
    printf(“Invalid intra prediction reference filter order %d”, iFilterOrder);
    exit(1);
    }

    Int ParShift = 6; //normalization factor
    Int ParScale = 1 << ParShift;
    Int ParOffset = 1 << (ParShift - 1);

//weight predict using original reference pixels
if (iOrigWeight != 0)
{
Int iCmptWeight = ParScale - iOrigWeight;
for (Int k = -iDoubleSize; k <= iDoubleSize; k++)
piLowPassRef[k] = (iOrigWeight * piRefrVector[k] + iCmptWeight * piLowPassRef[k] + ParOffset) >> ParShift;
}

-

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值