ECM6.0中CCCM代码学习

前言


前言

本文主要介绍了CCCM技术的概念,以及ECM6.0中对应代码阅读,主要涉及函数有xCccmCreateLumaRef,xCccmCalcModels, xCccmApplyModel,如有错误,还请大家批评指正。


一、CCCM技术概念

CCCM即基于卷积交叉分量模型 ,CCCM与CCLM类似,都是利用重建亮度值预测色度值,同样也分为单模型的CCCM和多模型的CCCM(将重建的luma样本根据划分阈值-luma平均值,分为两组分别推)。

 

其预测步骤如下:

1.参考样本区域的获取

如上图所示,参考样本区域为当前PU的上六行和左六列的色度值,参考区域向右扩展至一个PU宽度,向下扩展至一个PU高度,蓝色区域是为了支持模型中使用的7抽头空间滤波器(计算滤波器系数时,需要用其亮度分量)

2.参数计算

(1)计算亮度输入的自相关矩阵(每个像素点的对应相乘再相加,只求上三角,对称矩阵);

(2)计算亮度输入与色度输出之间的互相关向量(每个像素点的对应亮度乘该点的色度相加);

(3)LDL 分解,求出滤波器系数。

 

二、CCCM代码

1.入口函数

入口IntraSearch::xRecurIntraChromaCodingQT(该函数用于计算帧内模式的cost

if( pu.cccmFlag )
    {
      xGetLumaRecPixels( pu, cbArea );//获取亮度的重建值,完成下采样
      predIntraCCCM( pu, piPredCb, piPredCr, predMode );//执行cccm模式
      //predMode用于区分是单模型的cccm还是多模型
    }

2.获取亮度的重建值并下采样

xGetLumaRecPixels( pu, cbArea )中

void IntraPrediction::xGetLumaRecPixels(const PredictionUnit &pu, CompArea chromaArea)
{
#if JVET_AA0057_CCCM
  if ( pu.cccmFlag )
  {
    xCccmCreateLumaRef(pu);//函数获取参考区域和当前区域亮度重建像素的同时下采样
    return;
  }
#endif

主要看xCccmCreateLumaRef这个函数。

//函数获取参考区域和当前区域亮度重建像素的同时下采样
void IntraPrediction::xCccmCreateLumaRef(const PredictionUnit& pu)
{
  const CPelBuf recoLuma = pu.cs->picture->getRecoBuf(COMPONENT_Y);//获取重建亮度值
  const int  maxPosPicX  = pu.cs->picture->chromaSize().width  - 1;
  const int  maxPosPicY  = pu.cs->picture->chromaSize().height - 1;

  xCccmCalcRefArea(pu); // Find the reference area寻找可用的参考区域
  //通过调用 xCccmCalcRefArea 函数来检查参考区域的可用像素数,并将可用的参考区域尺寸和当前区域的位置和尺寸保存下来
  
  //areaWidth, areaHeight为refArea的宽度和高度, refSizeX, refSizeY为分别为左侧和上方可用的参考行/列个数,refPosPicX, refPosPicY为参考区域在图片(色度)中的位置,即指向参考区域的第一个像素点(不是扩展部分)
  int areaWidth, areaHeight, refSizeX, refSizeY, refPosPicX, refPosPicY;
  //refluma亮度下采样后(包括参考区域和PU区域)
  PelBuf refLuma = xCccmGetLumaRefBuf(pu, areaWidth, areaHeight, refSizeX, refSizeY, refPosPicX, refPosPicY);
  
  int puBorderX = refSizeX + pu.blocks[COMPONENT_Cb].width;//pu边界的x
  int puBorderY = refSizeY + pu.blocks[COMPONENT_Cb].height;//pu边界的y
  
  // Generate down-sampled luma for the area covering both the PU and the top/left reference areas (+ top and left paddings)
  //为覆盖PU和顶部/左侧参考区域的区域生成下采样亮度(+顶部和左侧填充)

  //这里的areaHeight和areaWidth是refArea的大小(不是pu本身大小)
  //循环下采样
  for (int y = -CCCM_FILTER_PADDING; y < areaHeight; y++)
  {
    for (int x = -CCCM_FILTER_PADDING; x < areaWidth; x++)
    {
      if (( x >= puBorderX && y >= refSizeY ) ||
          ( y >= puBorderY && x >= refSizeX ))
      {
        continue;
      }

      int chromaPosPicX = refPosPicX + x;
      int chromaPosPicY = refPosPicY + y;
      
      chromaPosPicX = chromaPosPicX < 0 ? 0 : chromaPosPicX > maxPosPicX ? maxPosPicX : chromaPosPicX;
      chromaPosPicY = chromaPosPicY < 0 ? 0 : chromaPosPicY > maxPosPicY ? maxPosPicY : chromaPosPicY;
      
      refLuma.at( x, y ) = xCccmGetLumaVal(pu, recoLuma, chromaPosPicX, chromaPosPicY);//单个亮度像素下采样
    }
  }
  

  //下面均是填充部分(就是参考样本区域边界,蓝色部分,但是不包括最上面一行(y=-1)和最左边一列(x=-1))
  //填充的这部分须对照参考区域的图看更容易理解
  CHECK( CCCM_FILTER_PADDING != 1, "Only padding with one sample implemented" );
 
  // Pad right of top reference area
  //填充蓝色区域最右边那一列,即X=areaWidth,Y=-1~refSizeY-1
  for (int y = -1; y < refSizeY; y++)
  {
    refLuma.at( areaWidth, y ) = refLuma.at( areaWidth - 1, y );
  }

  // Pad right of PU
  //填充pu的右边的蓝色区域
  for (int y = refSizeY; y < puBorderY; y++)
  {
    refLuma.at( puBorderX, y ) = refLuma.at( puBorderX - 1, y );
  }

  // Pad right of left reference area
  //填充PU第一列下面的蓝色区域
  for (int y = puBorderY; y < areaHeight; y++)
  {
    refLuma.at( refSizeX, y ) = refLuma.at( refSizeX - 1, y );
  }

  // Pad below left reference area
  //填充左边的下面的蓝色区域
  for (int x = -1; x < refSizeX + 1; x++)
  {
    refLuma.at( x, areaHeight ) = refLuma.at( x, areaHeight - 1 );
  }

  // Pad below PU
  //填充pu下面的蓝色区域
  for (int x = refSizeX; x < puBorderX + 1; x++)
  {
    refLuma.at( x, puBorderY ) = refLuma.at( x, puBorderY - 1 );//用上一行填充这一行
  }

  // Pad below right reference area
  //填充右边参考区域的下面的蓝色区域
  for (int x = puBorderX + 1; x < areaWidth + 1; x++)
  {
    refLuma.at( x, refSizeY ) = refLuma.at( x, refSizeY - 1 );
  }
  
  // In dualtree we can also use luma from the right and below (if not on CTU/picture boundary)
  //在dualtree双树中,我们也可以从右下方使用亮度(如果不在CTU/图片边界上)
  if ( CS::isDualITree( *pu.cs ) )
  {
    int ctuWidth  = pu.cs->sps->getMaxCUWidth()  >> getComponentScaleX(COMPONENT_Cb, pu.chromaFormat);
    int ctuHeight = pu.cs->sps->getMaxCUHeight() >> getComponentScaleY(COMPONENT_Cb, pu.chromaFormat);

    // Samples right of top reference area
    int padPosPicX = refPosPicX + areaWidth;

    if ( padPosPicX <= maxPosPicX && (padPosPicX % ctuWidth) )
    {
      for (int y = -1; y < refSizeY; y++)
      {
        int chromaPosPicY = refPosPicY + y;
        chromaPosPicY     = chromaPosPicY < 0 ? 0 : chromaPosPicY > maxPosPicY ? maxPosPicY : chromaPosPicY;

        refLuma.at( areaWidth, y ) = xCccmGetLumaVal(pu, recoLuma, padPosPicX, chromaPosPicY);
      }
    }

    // Samples right of PU
    padPosPicX = refPosPicX + puBorderX;

    if ( padPosPicX <= maxPosPicX && (padPosPicX % ctuWidth) )
    {
      for (int y = refSizeY; y < puBorderY; y++)
      {
        int chromaPosPicY = refPosPicY + y;
        chromaPosPicY     = chromaPosPicY < 0 ? 0 : chromaPosPicY > maxPosPicY ? maxPosPicY : chromaPosPicY;

        refLuma.at( puBorderX, y ) = xCccmGetLumaVal(pu, recoLuma, padPosPicX, chromaPosPicY);
      }
    }

    // Samples right of left reference area
    padPosPicX = refPosPicX + refSizeX;

    if ( padPosPicX <= maxPosPicX )
    {
      for (int y = puBorderY; y < areaHeight; y++)
      {
        int chromaPosPicY = refPosPicY + y;
        chromaPosPicY     = chromaPosPicY < 0 ? 0 : chromaPosPicY > maxPosPicY ? maxPosPicY : chromaPosPicY;

        refLuma.at( refSizeX, y ) = xCccmGetLumaVal(pu, recoLuma, padPosPicX, chromaPosPicY);
      }
    }
    
    // Samples below left reference area
    int padPosPicY = refPosPicY + areaHeight;
    
    if ( padPosPicY <= maxPosPicY && (padPosPicY % ctuHeight) )
    {
      for (int x = -1; x < refSizeX + 1; x++)
      {
        int chromaPosPicX = refPosPicX + x;
        chromaPosPicX     = chromaPosPicX < 0 ? 0 : chromaPosPicX > maxPosPicX ? maxPosPicX : chromaPosPicX;
        
        refLuma.at( x, areaHeight ) = xCccmGetLumaVal(pu, recoLuma, chromaPosPicX, padPosPicY);
      }
    }
    
    // Samples below PU
    padPosPicY = refPosPicY + puBorderY;
    
    if ( padPosPicY <= maxPosPicY && (padPosPicY % ctuHeight) )
    {
      for (int x = refSizeX; x < puBorderX; x++) // Just go to PU border as the next sample may be out of CTU (and not needed anyways)
      {
        int chromaPosPicX = refPosPicX + x;
        chromaPosPicX     = chromaPosPicX < 0 ? 0 : chromaPosPicX > maxPosPicX ? maxPosPicX : chromaPosPicX;
        
        refLuma.at( x, puBorderY ) = xCccmGetLumaVal(pu, recoLuma, chromaPosPicX, padPosPicY);
      }
    }

    // Samples below right reference area
    padPosPicY = refPosPicY + refSizeY;
    
    if ( padPosPicY <= maxPosPicY )
    {
      // Avoid going outside of right CTU border where these samples are not yet available
      int puPosPicX        = pu.blocks[COMPONENT_Cb].x;
      int ctuRightEdgeDist = ctuWidth - (puPosPicX % ctuWidth) + refSizeX;
      int lastPosX         = ctuRightEdgeDist < areaWidth ? ctuRightEdgeDist : areaWidth;

      for (int x = puBorderX + 1; x < lastPosX; x++) // Just go to ref area border as the next sample may be out of CTU (and not needed anyways)
      {
        int chromaPosPicX = refPosPicX + x;
        chromaPosPicX     = chromaPosPicX < 0 ? 0 : chromaPosPicX > maxPosPicX ? maxPosPicX : chromaPosPicX;
        
        refLuma.at( x, refSizeY ) = xCccmGetLumaVal(pu, recoLuma, chromaPosPicX, padPosPicY);
      }
    }
  }
}

3.CCCM计算

void IntraPrediction::predIntraCCCM( const PredictionUnit &pu, PelBuf &predCb, PelBuf &predCr, int intraDir )
{
  if ( pu.cccmFlag )
  {
    CccmModel cccmModelCb( pu.cu->slice->getSPS()->getBitDepth(CHANNEL_TYPE_LUMA) );//得到luma的深度——偏置项B
    CccmModel cccmModelCr( pu.cu->slice->getSPS()->getBitDepth(CHANNEL_TYPE_LUMA) );
    
    //单模型CCCM
    if ( PU::cccmSingleModeAvail(pu, intraDir) )
    {
      xCccmCalcModels(pu, cccmModelCb,  cccmModelCr, 0, 0);//计算抽头参数
      xCccmApplyModel(pu, COMPONENT_Cb, cccmModelCb, 0, 0, predCb);//求Cb的CCCM模式预测值
      xCccmApplyModel(pu, COMPONENT_Cr, cccmModelCr, 0, 0, predCr);//求Cr的CCCM模式预测值
    }
    //多模型的CCCM
    else
    {
      // Multimode case
      int modelThr = xCccmCalcRefAver(pu);//求参考区域luma样本均值,划分阈值

      //modelId小于划分阈值部分
      xCccmCalcModels(pu, cccmModelCb,  cccmModelCr, 1, modelThr);
      xCccmApplyModel(pu, COMPONENT_Cb, cccmModelCb, 1, modelThr, predCb);
      xCccmApplyModel(pu, COMPONENT_Cr, cccmModelCr, 1, modelThr, predCr);

      //modelId大于划分阈值部分
      xCccmCalcModels(pu, cccmModelCb,  cccmModelCr, 2, modelThr);
      xCccmApplyModel(pu, COMPONENT_Cb, cccmModelCb, 2, modelThr, predCb);
      xCccmApplyModel(pu, COMPONENT_Cr, cccmModelCr, 2, modelThr, predCr);
    }
  }

其中xCccmCalcModels函数用于计算抽头系数, xCccmApplyModel函数用于计算预测值。代码分别如下:

//计算参考区域中的亮度重建像素的自相关矩阵和亮度重建像素与色度重建像素的之间的互相关向量,再使用 LDL分解求解滤波器系数
void IntraPrediction::xCccmCalcModels(const PredictionUnit& pu, CccmModel &cccmModelCb, CccmModel &cccmModelCr, int modelId, int modelThr) const
{
  //areaWidth, areaHeight为当前refarea的宽度和高度, refSizeX, refSizeY为分别为上方和左侧可用的参考行/列个数,refPosPicX, refPosPicY为参考区域在图片中的位置,即指向参考区域的第一个像素点(不是扩展部分)
  int areaWidth, areaHeight, refSizeX, refSizeY, refPosPicX, refPosPicY;

  const CPelBuf recoCb  = pu.cs->picture->getRecoBuf(COMPONENT_Cb);//Cb的重建值
  const CPelBuf recoCr  = pu.cs->picture->getRecoBuf(COMPONENT_Cr);//Cr的重建值
  PelBuf        refLuma = xCccmGetLumaRefBuf(pu, areaWidth, areaHeight, refSizeX, refSizeY, refPosPicX, refPosPicY);//luma的参考像素

  int M = CCCM_NUM_PARAMS;//抽头个数
  
  int sampleNum = areaWidth * areaHeight - pu.blocks[COMPONENT_Cb].width * pu.blocks[COMPONENT_Cb].height;//样本个数
  int sampleInd = 0;
  
  // Collect reference data to input matrix A and target vector Y
  static Pel A[CCCM_NUM_PARAMS][CCCM_MAX_REF_SAMPLES];
  static Pel YCb[CCCM_MAX_REF_SAMPLES];
  static Pel YCr[CCCM_MAX_REF_SAMPLES];

  for (int y = 0; y < areaHeight; y++)
  {
    for (int x = 0; x < areaWidth; x++)
    {
      //超出参考区域范围跳出
      if ( x >= refSizeX && y >= refSizeY )
      {
        continue;
      }
      //如果是多模型,根据划分阈值分别计算CCCM参数
      if ( modelId == 1 && refLuma.at( x, y ) > modelThr ) // Model 1: Include only samples below or equal to the threshold仅包括低于或等于阈值的样本
      {
        continue;
      }
      if ( modelId == 2 && refLuma.at( x, y ) <= modelThr) // Model 2: Include only samples above the threshold仅包含高于阈值的样本
      {
        continue;
      }
      
      // 7-tap cross
      A[0][sampleInd] = refLuma.at( x  , y   ); // C
      A[1][sampleInd] = refLuma.at( x  , y-1 ); // N
      A[2][sampleInd] = refLuma.at( x  , y+1 ); // S
      A[3][sampleInd] = refLuma.at( x-1, y   ); // W
      A[4][sampleInd] = refLuma.at( x+1, y   ); // E
      A[5][sampleInd] = cccmModelCb.nonlinear( refLuma.at( x, y) );
      A[6][sampleInd] = cccmModelCb.bias();

      YCb[sampleInd]   = recoCb.at(refPosPicX + x, refPosPicY + y);
      YCr[sampleInd++] = recoCr.at(refPosPicX + x, refPosPicY + y);
    }
  }
 
  if ( sampleInd == 0 ) // Number of samples can go to zero in the multimode case在多模情况下,样本数可能为零
  {
    cccmModelCb.clearModel(M);//清空cccmModel
    cccmModelCr.clearModel(M);
    return;
  }
  else
  {
    sampleNum = sampleInd;
  }
  
  // Calculate autocorrelation matrix and cross-correlation vector
  //计算自相关矩阵和互相关向量
  static CccmCovarianceInt::TE ATA;
  static CccmCovarianceInt::Ty ATYCb;
  static CccmCovarianceInt::Ty ATYCr;

  memset(ATA  , 0x00, sizeof(TCccmCoeff) * CCCM_NUM_PARAMS * CCCM_NUM_PARAMS);
  memset(ATYCb, 0x00, sizeof(TCccmCoeff) * CCCM_NUM_PARAMS);
  memset(ATYCr, 0x00, sizeof(TCccmCoeff) * CCCM_NUM_PARAMS);

  //进入循环求自相关矩阵
  for (int coli0 = 0; coli0 < M; coli0++)
  {
    for (int coli1 = coli0; coli1 < M; coli1++)
    {
      Pel *col0 = A[coli0];
      Pel *col1 = A[coli1];
      
      for (int rowi = 0; rowi < sampleNum; rowi++)
      {
        ATA[coli0][coli1] += col0[rowi] * col1[rowi];
      }
    }
  }
  //求互相关向量
  for (int coli = 0; coli < M; coli++)
  {
    Pel *col = A[coli];
    
    for (int rowi = 0; rowi < sampleNum; rowi++)
    {
      ATYCb[coli] += col[rowi] * YCb[rowi];
      ATYCr[coli] += col[rowi] * YCr[rowi];
    }
  }

  // Scale the matrix and vector to selected dynamic range
  //将矩阵和矢量缩放到选定的动态范围
  int matrixShift = CCCM_MATRIX_BITS - 2 * pu.cu->cs->sps->getBitDepth(CHANNEL_TYPE_CHROMA) - ceilLog2(sampleNum);

  if ( matrixShift > 0 )
  {
    for (int coli0 = 0; coli0 < M; coli0++)
    {
      for (int coli1 = coli0; coli1 < M; coli1++)
      {
        ATA[coli0][coli1] <<= matrixShift;
      }
    }

    for (int coli = 0; coli < M; coli++)
    {
      ATYCb[coli] <<= matrixShift;
    }

    for (int coli = 0; coli < M; coli++)
    {
      ATYCr[coli] <<= matrixShift;
    }
  }
  else if ( matrixShift < 0 )
  {
    matrixShift = -matrixShift;
    
    for (int coli0 = 0; coli0 < M; coli0++)
    {
      for (int coli1 = coli0; coli1 < M; coli1++)
      {
        ATA[coli0][coli1] >>= matrixShift;
      }
    }

    for (int coli = 0; coli < M; coli++)
    {
      ATYCb[coli] >>= matrixShift;
    }

    for (int coli = 0; coli < M; coli++)
    {
      ATYCr[coli] >>= matrixShift;
    }
  }
  
  // Solve the filter coefficients using LDL decomposition
  //使用LDL分解求解滤波器系数
  CccmCovarianceInt cccmSolver;
  CccmCovarianceInt::TE U;       // Upper triangular L' of ATA's LDL decomposition
  CccmCovarianceInt::Ty diag;    // Diagonal of D

  bool decompOk = cccmSolver.ldlDecompose(ATA, U, diag, M);
  
  cccmSolver.ldlSolve(U, diag, ATYCb, cccmModelCb.params, M, decompOk);
  cccmSolver.ldlSolve(U, diag, ATYCr, cccmModelCr.params, M, decompOk);
}
void IntraPrediction::xCccmApplyModel(const PredictionUnit& pu, const ComponentID compId, CccmModel &cccmModel, int modelId, int modelThr, PelBuf &piPred) const
{
  const  ClpRng& clpRng(pu.cu->cs->slice->clpRng(compId));
  static Pel     samples[CCCM_NUM_PARAMS];//抽头个数

  CPelBuf refLumaBlk = xCccmGetLumaPuBuf(pu);

  for (int y = 0; y < refLumaBlk.height; y++)
  {
    for (int x = 0; x < refLumaBlk.width; x++)
    {
      //仅包括低于或等于阈值的样本
      if ( modelId == 1 && refLumaBlk.at( x, y ) > modelThr ) // Model 1: Include only samples below or equal to the threshold
      {
        continue;
      }
      //仅包括高于阈值的样本
      if ( modelId == 2 && refLumaBlk.at( x, y ) <= modelThr) // Model 2: Include only samples above the threshold
      {
        continue;
      }
      
      // 7-tap cross
      samples[0] = refLumaBlk.at( x  , y   ); // C中心的亮度
      samples[1] = refLumaBlk.at( x  , y-1 ); // N上面
      samples[2] = refLumaBlk.at( x  , y+1 ); // S下面
      samples[3] = refLumaBlk.at( x-1, y   ); // W左边
      samples[4] = refLumaBlk.at( x+1, y   ); // E右边
      samples[5] = cccmModel.nonlinear( refLumaBlk.at( x, y) );//非线性项
      samples[6] = cccmModel.bias();//偏置项

      piPred.at(x, y) = ClipPel<Pel>( cccmModel.convolve(samples, CCCM_NUM_PARAMS), clpRng );//计算得到最终预测值
    }
  }
}

  • 4
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值