AEC部分核心源码

AEC部分核心源码

由于该是在别人的github里边下载到的,先把代码贴上:

/*
 *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */

/*
 * The core AEC algorithm, which is presented with time-aligned signals.
 
 AEC核心算法与对齐信号一起呈现*/

#include "webrtc/modules/audio_processing/aec/aec_core.h"

#ifdef WEBRTC_AEC_DEBUG_DUMP
#include <stdio.h>
#endif

#include <assert.h>
#include <math.h>
#include <stddef.h>  // size_t
#include <stdlib.h>
#include <string.h>

#include "webrtc/common_audio/ring_buffer.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_processing/aec/aec_common.h"
#include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
#include "webrtc/modules/audio_processing/aec/aec_rdft.h"
#include "webrtc/modules/audio_processing/logging/aec_logging.h"
#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h"
#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
#include "webrtc/typedefs.h"


// Buffer size (samples)
static const size_t kBufSizePartitions = 250;  // 1秒16hz的音频,1 second of audio in 16 kHz.

// Metrics:指标
static const int subCountLen = 4;//子计数长度
static const int countLen = 50;
//延迟指标聚合窗口
static const int kDelayMetricsAggregationWindow = 1250;  // 5 seconds at 16 kHz.

// Quantities to control H band scaling for SWB input--用于控制SWB输入的H波段缩放的数量
static const int flagHbandCn = 1;  // 用于在H波段添加舒适噪声的标志
static const float cnScaleHband =
    (float)0.4;  //H波段舒适噪音的标度
//初始bin,用于平均低频段的nlp增益
static const int freqAvgIc = PART_LEN / 2;

// matlab代码生成表:
// win = sqrt(hanning(63)); win= [0; win(1:32)];
// fprintf(1,'\ t%.14f,%.14f,%.14f,\ n',win);
//添加汉明窗
ALIGN16_BEG const float ALIGN16_END WebRtcAec_sqrtHanning[65] = {
    0.00000000000000f, 0.02454122852291f, 0.04906767432742f, 0.07356456359967f,
    0.09801714032956f, 0.12241067519922f, 0.14673047445536f, 0.17096188876030f,
    0.19509032201613f, 0.21910124015687f, 0.24298017990326f, 0.26671275747490f,
    0.29028467725446f, 0.31368174039889f, 0.33688985339222f, 0.35989503653499f,
    0.38268343236509f, 0.40524131400499f, 0.42755509343028f, 0.44961132965461f,
    0.47139673682600f, 0.49289819222978f, 0.51410274419322f, 0.53499761988710f,
    0.55557023301960f, 0.57580819141785f, 0.59569930449243f, 0.61523159058063f,
    0.63439328416365f, 0.65317284295378f, 0.67155895484702f, 0.68954054473707f,
    0.70710678118655f, 0.72424708295147f, 0.74095112535496f, 0.75720884650648f,
    0.77301045336274f, 0.78834642762661f, 0.80320753148064f, 0.81758481315158f,
    0.83146961230255f, 0.84485356524971f, 0.85772861000027f, 0.87008699110871f,
    0.88192126434835f, 0.89322430119552f, 0.90398929312344f, 0.91420975570353f,
    0.92387953251129f, 0.93299279883474f, 0.94154406518302f, 0.94952818059304f,
    0.95694033573221f, 0.96377606579544f, 0.97003125319454f, 0.97570213003853f,
    0.98078528040323f, 0.98527764238894f, 0.98917650996478f, 0.99247953459871f,
    0.99518472667220f, 0.99729045667869f, 0.99879545620517f, 0.99969881869620f,
    1.00000000000000f};

// matlab代码生成表:
// weightCurve = [0 ; 0.3 * sqrt(linspace(0,1,64))' + 0.1];
// fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', weightCurve) 
//曲线权重
ALIGN16_BEG const float ALIGN16_END WebRtcAec_weightCurve[65] = {
    0.0000f, 0.1000f, 0.1378f, 0.1535f, 0.1655f, 0.1756f, 0.1845f, 0.1926f,
    0.2000f, 0.2069f, 0.2134f, 0.2195f, 0.2254f, 0.2309f, 0.2363f, 0.2414f,
    0.2464f, 0.2512f, 0.2558f, 0.2604f, 0.2648f, 0.2690f, 0.2732f, 0.2773f,
    0.2813f, 0.2852f, 0.2890f, 0.2927f, 0.2964f, 0.3000f, 0.3035f, 0.3070f,
    0.3104f, 0.3138f, 0.3171f, 0.3204f, 0.3236f, 0.3268f, 0.3299f, 0.3330f,
    0.3360f, 0.3390f, 0.3420f, 0.3449f, 0.3478f, 0.3507f, 0.3535f, 0.3563f,
    0.3591f, 0.3619f, 0.3646f, 0.3673f, 0.3699f, 0.3726f, 0.3752f, 0.3777f,
    0.3803f, 0.3828f, 0.3854f, 0.3878f, 0.3903f, 0.3928f, 0.3952f, 0.3976f,
    0.4000f};

//  matlab代码生成表:
// overDriveCurve = [sqrt(linspace(0,1,65))' + 1];
// fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', overDriveCurve);
//超驱动曲线
ALIGN16_BEG const float ALIGN16_END WebRtcAec_overDriveCurve[65] = {
    1.0000f, 1.1250f, 1.1768f, 1.2165f, 1.2500f, 1.2795f, 1.3062f, 1.3307f,
    1.3536f, 1.3750f, 1.3953f, 1.4146f, 1.4330f, 1.4507f, 1.4677f, 1.4841f,
    1.5000f, 1.5154f, 1.5303f, 1.5449f, 1.5590f, 1.5728f, 1.5863f, 1.5995f,
    1.6124f, 1.6250f, 1.6374f, 1.6495f, 1.6614f, 1.6731f, 1.6847f, 1.6960f,
    1.7071f, 1.7181f, 1.7289f, 1.7395f, 1.7500f, 1.7603f, 1.7706f, 1.7806f,
    1.7906f, 1.8004f, 1.8101f, 1.8197f, 1.8292f, 1.8385f, 1.8478f, 1.8570f,
    1.8660f, 1.8750f, 1.8839f, 1.8927f, 1.9014f, 1.9100f, 1.9186f, 1.9270f,
    1.9354f, 1.9437f, 1.9520f, 1.9601f, 1.9682f, 1.9763f, 1.9843f, 1.9922f,
    2.0000f};

// 延迟不可知AEC参数仍在开发中,可能会更改。
static const float kDelayQualityThresholdMax = 0.07f;
static const float kDelayQualityThresholdMin = 0.01f;
static const int kInitialShiftOffset = 5;//初始移位偏移
#if !defined(WEBRTC_ANDROID)
static const int kDelayCorrectionStart = 1500;  // 10 ms 块  延迟校正开始数据
#endif

// nlp模式的目标抑制级别。
// log{0.001, 0.00001, 0.00000001}
//目标抑制数组
static const float kTargetSupp[3] = {-6.9f, -11.5f, -18.4f};

// 两套参数,一组用于扩展滤波器模式。
static const float kExtendedMinOverDrive[3] = {3.0f, 6.0f, 15.0f};//扩展模式使用
static const float kNormalMinOverDrive[3] = {1.0f, 2.0f, 5.0f};//普通参数
//扩展平滑系数设置
const float WebRtcAec_kExtendedSmoothingCoefficients[2][2] = {{0.9f, 0.1f},
                                                              {0.92f, 0.08f}};//扩展平滑系数
 //正常平滑系数设置                                                        
const float WebRtcAec_kNormalSmoothingCoefficients[2][2] = {{0.9f, 0.1f},
                                                            {0.93f, 0.07f}};//正常平滑系数

// 构成NLP“首选”频段的分区数。
enum {
  kPrefBandSize = 24//首选区段大小
};

#ifdef WEBRTC_AEC_DEBUG_DUMP
//扩展使用上边的实例count
extern int webrtc_aec_instance_count;
#endif

WebRtcAecFilterFar WebRtcAec_FilterFar;//FIR过滤器
WebRtcAecScaleErrorSignal WebRtcAec_ScaleErrorSignal;//误差信号e(n)
WebRtcAecFilterAdaptation WebRtcAec_FilterAdaptation;//自适应滤波器
WebRtcAecOverdriveAndSuppress WebRtcAec_OverdriveAndSuppress;//过载和抑制
WebRtcAecComfortNoise WebRtcAec_ComfortNoise;//舒适噪音
WebRtcAecSubBandCoherence WebRtcAec_SubbandCoherence;//子带相干性

__inline static float MulRe(float aRe, float aIm, float bRe, float bIm) {
  return aRe * bRe - aIm * bIm;
}

__inline static float MulIm(float aRe, float aIm, float bRe, float bIm) {
  return aRe * bIm + aIm * bRe;
}

static int CmpFloat(const void* a, const void* b) {
  const float* da = (const float*)a;
  const float* db = (const float*)b;

  return (*da > *db) - (*da < *db);
}
//远端过滤器的方法
static void FilterFar(AecCore* aec, float yf[2][PART_LEN1]) {
  int i;
  for (i = 0; i < aec->num_partitions; i++) {
    int j;
    //BufBlockPos:缓冲区的位置
    int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
    int pos = i * PART_LEN1;
    // Check for wrap
    if (i + aec->xfBufBlockPos >= aec->num_partitions) {
      xPos -= aec->num_partitions * (PART_LEN1);
    }

    for (j = 0; j < PART_LEN1; j++) {
      yf[0][j] += MulRe(aec->xfBuf[0][xPos + j],
                        aec->xfBuf[1][xPos + j],
                        aec->wfBuf[0][pos + j],
                        aec->wfBuf[1][pos + j]);
      yf[1][j] += MulIm(aec->xfBuf[0][xPos + j],
                        aec->xfBuf[1][xPos + j],
                        aec->wfBuf[0][pos + j],
                        aec->wfBuf[1][pos + j]);
    }
  }
}
//误差估计error信号
static void ScaleErrorSignal(AecCore* aec, float ef[2][PART_LEN1]) {
  const float mu = aec->extended_filter_enabled ? kExtendedMu : aec->normal_mu;
  //error_threshold:误差信号阈值
  const float error_threshold = aec->extended_filter_enabled
                                    ? kExtendedErrorThreshold
                                    : aec->normal_error_threshold;
  int i;
  float abs_ef;
  for (i = 0; i < (PART_LEN1); i++) {
    ef[0][i] /= (aec->xPow[i] + 1e-10f);
    ef[1][i] /= (aec->xPow[i] + 1e-10f);
    abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);

    if (abs_ef > error_threshold) {
      abs_ef = error_threshold / (abs_ef + 1e-10f);
      ef[0][i] *= abs_ef;
      ef[1][i] *= abs_ef;
    }

    // 步长因子
    //const float mu = aec->extended_filter_enabled ? kExtendedMu : aec->normal_mu;
    ef[0][i] *= mu;
    ef[1][i] *= mu;
  }
}

//无时间限制的滤波器自适应。
// TODO(andrew):考虑使用低复杂度模式。
//无限自适应过滤器
// static void FilterAdaptationUnconstrained(AecCore* aec, float *fft,
//                                          float ef[2][PART_LEN1]) {
//  int i, j;
//  for (i = 0; i < aec->num_partitions; i++) {
//    int xPos = (i + aec->xfBufBlockPos)*(PART_LEN1);
//    int pos;
//    // Check for wrap
//    if (i + aec->xfBufBlockPos >= aec->num_partitions) {
//      xPos -= aec->num_partitions * PART_LEN1;
//    }
//
//    pos = i * PART_LEN1;
//
//    for (j = 0; j < PART_LEN1; j++) {
//      aec->wfBuf[0][pos + j] += MulRe(aec->xfBuf[0][xPos + j],
//                                      -aec->xfBuf[1][xPos + j],
//                                      ef[0][j], ef[1][j]);
//      aec->wfBuf[1][pos + j] += MulIm(aec->xfBuf[0][xPos + j],
//                                      -aec->xfBuf[1][xPos + j],
//                                      ef[0][j], ef[1][j]);
//    }
//  }
//}
//自适应滤波器处理逻辑
static void FilterAdaptation(AecCore* aec, float* fft, float ef[2][PART_LEN1]) {
  int i, j;
  for (i = 0; i < aec->num_partitions; i++) {
    int xPos = (i + aec->xfBufBlockPos) * (PART_LEN1);
    int pos;
    // Check for wrap
    if (i + aec->xfBufBlockPos >= aec->num_partitions) {
      xPos -= aec->num_partitions * PART_LEN1;
    }

    pos = i * PART_LEN1;

    for (j = 0; j < PART_LEN; j++) {

      fft[2 * j] = MulRe(aec->xfBuf[0][xPos + j],
                         -aec->xfBuf[1][xPos + j],
                         ef[0][j],
                         ef[1][j]);
      fft[2 * j + 1] = MulIm(aec->xfBuf[0][xPos + j],
                             -aec->xfBuf[1][xPos + j],
                             ef[0][j],
                             ef[1][j]);
    }
    fft[1] = MulRe(aec->xfBuf[0][xPos + PART_LEN],
                   -aec->xfBuf[1][xPos + PART_LEN],
                   ef[0][PART_LEN],
                   ef[1][PART_LEN]);
//fft的逆变换
    aec_rdft_inverse_128(fft);
    memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);

    // fft缩放
    {
      float scale = 2.0f / PART_LEN2;
      for (j = 0; j < PART_LEN; j++) {
        fft[j] *= scale;
      }
    }
    aec_rdft_forward_128(fft);

    aec->wfBuf[0][pos] += fft[0];
    aec->wfBuf[0][pos + PART_LEN] += fft[1];

    for (j = 1; j < PART_LEN; j++) {
      aec->wfBuf[0][pos + j] += fft[2 * j];
      aec->wfBuf[1][pos + j] += fft[2 * j + 1];
    }
  }
}
//过载抑制
static void OverdriveAndSuppress(AecCore* aec,
                                 float hNl[PART_LEN1],
                                 const float hNlFb,
                                 float efw[2][PART_LEN1]) {
  int i;
  for (i = 0; i < PART_LEN1; i++) {
    //  Weight subbands
    if (hNl[i] > hNlFb) {
      hNl[i] = WebRtcAec_weightCurve[i] * hNlFb +
               (1 - WebRtcAec_weightCurve[i]) * hNl[i];
    }
    hNl[i] = powf(hNl[i], aec->overDriveSm * WebRtcAec_overDriveCurve[i]);

    // 抑制错误信号
    efw[0][i] *= hNl[i];
    efw[1][i] *= hNl[i];

    // Ooura fft 在虚部返回不正确的信号. It matters here
    // because we are making an additive change with comfort noise.
    efw[1][i] *= -1;
  }
}
//延迟分区
static int PartitionDelay(const AecCore* aec) {
   //测量每个过滤器分区中的能量,并使用
   //最高能量。
   // TODO(bjornv):通过在每个分区上计算一个分区来分散计算成本
   //阻止?
  float wfEnMax = 0;
  int i;
  int delay = 0;

  for (i = 0; i < aec->num_partitions; i++) {
    int j;
    int pos = i * PART_LEN1;
    float wfEn = 0;
    for (j = 0; j < PART_LEN1; j++) {
      wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] +
          aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j];
    }

    if (wfEn > wfEnMax) {
      wfEnMax = wfEn;
      delay = i;
    }
  }
  return delay;
}

//阈值,以防止零远端的不良影响。
const float WebRtcAec_kMinFarendPSD = 15;

// 更新以下平滑的功率谱密度(PSD):
//  - sd  : near-end--近端
//  - se  : residual echo--残留回波
//  - sx  : far-end--远端
//  - sde : cross-PSD of near-end and residual echo--近端和残留回波的交叉PSD
//  - sxd : cross-PSD of near-end and far-end--近端和远端的交叉PSD
//
// 除了更新PSD,还确定滤波器的发散状态
//采取行动后。
static void SmoothedPSD(AecCore* aec,
                        float efw[2][PART_LEN1],
                        float dfw[2][PART_LEN1],
                        float xfw[2][PART_LEN1]) {
  //功率估计平滑系数。
  const float* ptrGCoh = aec->extended_filter_enabled
      ? WebRtcAec_kExtendedSmoothingCoefficients[aec->mult - 1]
      : WebRtcAec_kNormalSmoothingCoefficients[aec->mult - 1];
  int i;
  float sdSum = 0, seSum = 0;

  for (i = 0; i < PART_LEN1; i++) {
    aec->sd[i] = ptrGCoh[0] * aec->sd[i] +
                 ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
    aec->se[i] = ptrGCoh[0] * aec->se[i] +
                 ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
    //我们在此处设置阈值,以防止零费用3的不利影响。
     //阈值不是任意选择的,但可以平衡保护和
     //与算法调整之间的不利相互作用。
     // TODO(bjornv):进一步研究为什么它如此敏感。
    aec->sx[i] =
        ptrGCoh[0] * aec->sx[i] +
        ptrGCoh[1] * WEBRTC_SPL_MAX(
            xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i],
            WebRtcAec_kMinFarendPSD);

    aec->sde[i][0] =
        ptrGCoh[0] * aec->sde[i][0] +
        ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
    aec->sde[i][1] =
        ptrGCoh[0] * aec->sde[i][1] +
        ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);

    aec->sxd[i][0] =
        ptrGCoh[0] * aec->sxd[i][0] +
        ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
    aec->sxd[i][1] =
        ptrGCoh[0] * aec->sxd[i][1] +
        ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);

    sdSum += aec->sd[i];
    seSum += aec->se[i];
  }

  // 发散过滤器防护                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                            .
  aec->divergeState = (aec->divergeState ? 1.05f : 1.0f) * seSum > sdSum;

  if (aec->divergeState)
    memcpy(efw, dfw, sizeof(efw[0][0]) * 2 * PART_LEN1);

  // 如果误差远大于近端(13 dB),则复位。
  if (!aec->extended_filter_enabled && seSum > (19.95f * sdSum))
    memset(aec->wfBuf, 0, sizeof(aec->wfBuf));
}

// fft要使用的窗口时域数据。
__inline static void WindowData(float* x_windowed, const float* x) {
  int i;
  for (i = 0; i < PART_LEN; i++) {
    x_windowed[i] = x[i] * WebRtcAec_sqrtHanning[i];
    x_windowed[PART_LEN + i] =
        x[PART_LEN + i] * WebRtcAec_sqrtHanning[PART_LEN - i];
  }
}

// 将fft输出数据放入一个复数值数组中。
__inline static void StoreAsComplex(const float* data,
                                    float data_complex[2][PART_LEN1]) {
  int i;
  data_complex[0][0] = data[0];
  data_complex[1][0] = 0;
  for (i = 1; i < PART_LEN; i++) {
    data_complex[0][i] = data[2 * i];
    data_complex[1][i] = data[2 * i + 1];
  }
  data_complex[0][PART_LEN] = data[1];
  data_complex[1][PART_LEN] = 0;
}
//子带相干性
static void SubbandCoherence(AecCore* aec,
                             float efw[2][PART_LEN1],
                             float xfw[2][PART_LEN1],
                             float* fft,
                             float* cohde,
                             float* cohxd) {
  float dfw[2][PART_LEN1];
  int i;

  if (aec->delayEstCtr == 0)
    aec->delayIdx = PartitionDelay(aec);

  // 使用远端延迟
  memcpy(xfw,
         aec->xfwBuf + aec->delayIdx * PART_LEN1,
         sizeof(xfw[0][0]) * 2 * PART_LEN1);

  // 窗口的近端 fft
  WindowData(fft, aec->dBuf);
  aec_rdft_forward_128(fft);
  StoreAsComplex(fft, dfw);

  // 窗口的误差 fft
  WindowData(fft, aec->eBuf);
  aec_rdft_forward_128(fft);
  StoreAsComplex(fft, efw);

  SmoothedPSD(aec, efw, dfw, xfw);

  // 子带相干性
  for (i = 0; i < PART_LEN1; i++) {
    cohde[i] =
        (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) /
        (aec->sd[i] * aec->se[i] + 1e-10f);
    cohxd[i] =
        (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) /
        (aec->sx[i] * aec->sd[i] + 1e-10f);
  }
}
//获取高频带增益
static void GetHighbandGain(const float* lambda, float* nlpGainHband) {
  int i;

  nlpGainHband[0] = (float)0.0;
  for (i = freqAvgIc; i < PART_LEN1 - 1; i++) {
    nlpGainHband[0] += lambda[i];
  }
  nlpGainHband[0] /= (float)(PART_LEN1 - 1 - freqAvgIc);
}
//舒适噪音产生
static void ComfortNoise(AecCore* aec,
                         float efw[2][PART_LEN1],
                         complex_t* comfortNoiseHband,
                         const float* noisePow,
                         const float* lambda) {
  int i, num;
  float rand[PART_LEN];
  float noise, noiseAvg, tmp, tmpAvg;
  int16_t randW16[PART_LEN];
  complex_t u[PART_LEN1];

  const float pi2 = 6.28318530717959f;

  // 在[0 1]上生成统一的随机数组
  WebRtcSpl_RandUArray(randW16, PART_LEN, &aec->seed);
  for (i = 0; i < PART_LEN; i++) {
    rand[i] = ((float)randW16[i]) / 32768;
  }

  //抑制低频噪声
  u[0][0] = 0;
  u[0][1] = 0;
  for (i = 1; i < PART_LEN1; i++) {
    tmp = pi2 * rand[i - 1];

    noise = sqrtf(noisePow[i]);
    u[i][0] = noise * cosf(tmp);
    u[i][1] = -noise * sinf(tmp);
  }
  u[PART_LEN][1] = 0;

  for (i = 0; i < PART_LEN1; i++) {
    // 这是与背景噪声功率匹配的适当权重
    tmp = sqrtf(WEBRTC_SPL_MAX(1 - lambda[i] * lambda[i], 0));
    // tmp = 1 - lambda[i];
    efw[0][i] += tmp * u[i][0];
    efw[1][i] += tmp * u[i][1];
  }

  //用于H波段舒适噪音
   // TODO:不要两次计算噪声和“ tmp”。 使用以前的结果。
  noiseAvg = 0.0;
  tmpAvg = 0.0;
  num = 0;
  if (aec->num_bands > 1 && flagHbandCn == 1) {

   //平均噪音等级
     //平均频率频谱的后半部分(即4-> 8khz)
     // TODO:我们不需要num。 我们知道要累加多少元素。
    for (i = PART_LEN1 >> 1; i < PART_LEN1; i++) {
      num++;
      noiseAvg += sqrtf(noisePow[i]);
    }
    noiseAvg /= (float)num;

   //平均nlp比例
     //平均频率频谱的后半部分(即4-> 8khz)
     // TODO:我们不需要num。 我们知道要累加多少元素。
    num = 0;
    for (i = PART_LEN1 >> 1; i < PART_LEN1; i++) {
      num++;
      tmpAvg += sqrtf(WEBRTC_SPL_MAX(1 - lambda[i] * lambda[i], 0));
    }
    tmpAvg /= (float)num;

    //对H波段使用平均噪声
     // TODO:我们这里可能应该有一个新的随机向量。
     //拒绝低频噪声。
    u[0][0] = 0;
    u[0][1] = 0;
    for (i = 1; i < PART_LEN1; i++) {
      tmp = pi2 * rand[i - 1];

      //对H波段使用平均噪声
      u[i][0] = noiseAvg * (float)cos(tmp);
      u[i][1] = -noiseAvg * (float)sin(tmp);
    }
    u[PART_LEN][1] = 0;

    for (i = 0; i < PART_LEN1; i++) {
      // Use average NLP weight for H band
      comfortNoiseHband[i][0] = tmpAvg * u[i][0];
      comfortNoiseHband[i][1] = tmpAvg * u[i][1];
    }
  }
}
//初始化level
static void InitLevel(PowerLevel* level) {
  const float kBigFloat = 1E17f;

  level->averagelevel = 0;
  level->framelevel = 0;
  level->minlevel = kBigFloat;
  level->frsum = 0;
  level->sfrsum = 0;
  level->frcounter = 0;
  level->sfrcounter = 0;
}
//初始化数据
static void InitStats(Stats* stats) {
  stats->instant = kOffsetLevel;
  stats->average = kOffsetLevel;
  stats->max = kOffsetLevel;
  stats->min = kOffsetLevel * (-1);
  stats->sum = 0;
  stats->hisum = 0;
  stats->himean = kOffsetLevel;
  stats->counter = 0;
  stats->hicounter = 0;
}

static void InitMetrics(AecCore* self) {
  self->stateCounter = 0;
  InitLevel(&self->farlevel);
  InitLevel(&self->nearlevel);
  InitLevel(&self->linoutlevel);
  InitLevel(&self->nlpoutlevel);

  InitStats(&self->erl);
  InitStats(&self->erle);
  InitStats(&self->aNlp);
  InitStats(&self->rerl);
}

static void UpdateLevel(PowerLevel* level, float in[2][PART_LEN1]) {
  //在频域中进行能量计算。 FFT在
  //由于重叠,PART_LEN2个样本的一部分,但是我们只需要能量
  //一半的数据(最后的PART_LEN样本)。 Parseval的关系状态
  //能量根据
  //
  // \ sum_ {n = 0} ^ {N-1} | x(n)| ^ 2 = 1 / N * \ sum_ {n = 0} ^ {N-1} | X(n)| ^ 2
  // =能源,
  //
  //其中N = PART_LEN2。因为我们只对计算能量感兴趣
  //对于最后的PART_LEN样本,我们通过计算ENERGY和
  //除以2
  //
  // \ sum_ {n = N / 2} ^ {N-1} | x(n)| ^ 2〜=能源/ 2
  //
  //由于我们处理的是实值时域信号,因此我们只存储频率
  // bins [0,PART_LEN],这是| in |由组成。为了计算能量,我们
  //需要添加缺少部分的贡献
  // [PART_LEN + 1,PART_LEN2-1]。在相移之前,这些值是相同的
  //使用[1,PART_LEN-1]中的值,因此将这些值乘以2。
  //是下面的for循环中的值,但是乘以2和除法
  //被2取消。

  // TODO(bjornv):研究在其他地方重复使用的能源计算
  //放置在代码中。
  int k = 1;
  // Imaginary parts are zero at end points and left out of the calculation.
  float energy = (in[0][0] * in[0][0]) / 2;
  energy += (in[0][PART_LEN] * in[0][PART_LEN]) / 2;

  for (k = 1; k < PART_LEN; k++) {
    energy += (in[0][k] * in[0][k] + in[1][k] * in[1][k]);
  }
  energy /= PART_LEN2;

  level->sfrsum += energy;
  level->sfrcounter++;

  if (level->sfrcounter > subCountLen) {
    level->framelevel = level->sfrsum / (subCountLen * PART_LEN);
    level->sfrsum = 0;
    level->sfrcounter = 0;
    if (level->framelevel > 0) {
      if (level->framelevel < level->minlevel) {
        level->minlevel = level->framelevel;  // New minimum.
      } else {
        level->minlevel *= (1 + 0.001f);  // Small increase.
      }
    }
    level->frcounter++;
    level->frsum += level->framelevel;
    if (level->frcounter > countLen) {
      level->averagelevel = level->frsum / countLen;
      level->frsum = 0;
      level->frcounter = 0;
    }
  }
}

static void UpdateMetrics(AecCore* aec) {
  float dtmp, dtmp2;

  const float actThresholdNoisy = 8.0f;
  const float actThresholdClean = 40.0f;
  const float safety = 0.99995f;
  const float noisyPower = 300000.0f;

  float actThreshold;
  float echo, suppressedEcho;

  if (aec->echoState) {  // 检查是否可能存在回声
    aec->stateCounter++;
  }

  if (aec->farlevel.frcounter == 0) {

    if (aec->farlevel.minlevel < noisyPower) {
      actThreshold = actThresholdClean;
    } else {
      actThreshold = actThresholdNoisy;
    }

    if ((aec->stateCounter > (0.5f * countLen * subCountLen)) &&
        (aec->farlevel.sfrcounter == 0)

        // 仅在活动的远端进行估计
        &&
        (aec->farlevel.averagelevel >
         (actThreshold * aec->farlevel.minlevel))) {

      // 减去噪声功率
      echo = aec->nearlevel.averagelevel - safety * aec->nearlevel.minlevel;

      // ERL
      dtmp = 10 * (float)log10(aec->farlevel.averagelevel /
                                   aec->nearlevel.averagelevel +
                               1e-10f);
      dtmp2 = 10 * (float)log10(aec->farlevel.averagelevel / echo + 1e-10f);

      aec->erl.instant = dtmp;
      if (dtmp > aec->erl.max) {
        aec->erl.max = dtmp;
      }

      if (dtmp < aec->erl.min) {
        aec->erl.min = dtmp;
      }

      aec->erl.counter++;
      aec->erl.sum += dtmp;
      aec->erl.average = aec->erl.sum / aec->erl.counter;

      // 上均值
      if (dtmp > aec->erl.average) {
        aec->erl.hicounter++;
        aec->erl.hisum += dtmp;
        aec->erl.himean = aec->erl.hisum / aec->erl.hicounter;
      }

      // A_NLP
      dtmp = 10 * (float)log10(aec->nearlevel.averagelevel /
                                   (2 * aec->linoutlevel.averagelevel) +
                               1e-10f);

      // subtract noise power
      suppressedEcho = 2 * (aec->linoutlevel.averagelevel -
                            safety * aec->linoutlevel.minlevel);

      dtmp2 = 10 * (float)log10(echo / suppressedEcho + 1e-10f);

      aec->aNlp.instant = dtmp2;
      if (dtmp > aec->aNlp.max) {
        aec->aNlp.max = dtmp;
      }

      if (dtmp < aec->aNlp.min) {
        aec->aNlp.min = dtmp;
      }

      aec->aNlp.counter++;
      aec->aNlp.sum += dtmp;
      aec->aNlp.average = aec->aNlp.sum / aec->aNlp.counter;

      // 上均值
      if (dtmp > aec->aNlp.average) {
        aec->aNlp.hicounter++;
        aec->aNlp.hisum += dtmp;
        aec->aNlp.himean = aec->aNlp.hisum / aec->aNlp.hicounter;
      }

      // ERLE

      // subtract noise power
      suppressedEcho = 2 * (aec->nlpoutlevel.averagelevel -
                            safety * aec->nlpoutlevel.minlevel);

      dtmp = 10 * (float)log10(aec->nearlevel.averagelevel /
                                   (2 * aec->nlpoutlevel.averagelevel) +
                               1e-10f);
      dtmp2 = 10 * (float)log10(echo / suppressedEcho + 1e-10f);

      dtmp = dtmp2;
      aec->erle.instant = dtmp;
      if (dtmp > aec->erle.max) {
        aec->erle.max = dtmp;
      }

      if (dtmp < aec->erle.min) {
        aec->erle.min = dtmp;
      }

      aec->erle.counter++;
      aec->erle.sum += dtmp;
      aec->erle.average = aec->erle.sum / aec->erle.counter;

      // Upper mean
      if (dtmp > aec->erle.average) {
        aec->erle.hicounter++;
        aec->erle.hisum += dtmp;
        aec->erle.himean = aec->erle.hisum / aec->erle.hicounter;
      }
    }

    aec->stateCounter = 0;
  }
}
//初始化指标
static void UpdateDelayMetrics(AecCore* self) {
  int i = 0;
  int delay_values = 0;
  int median = 0;
  int lookahead = WebRtc_lookahead(self->delay_estimator);
  const int kMsPerBlock = PART_LEN / (self->mult * 8);
  int64_t l1_norm = 0;

  if (self->num_delay_values == 0) {
  //我们没有新的延迟值数据。 即使-1是有效的|中位数| 在
     //从某种意义上说,我们允许使用负值,但实际上永远不会
     //因为| kMsPerBlock |的倍数而使用 将始终返回。
     //因此,我们使用-1在日志中指出延迟估算器为
     //无法估算延迟。
    self->delay_median = -1;
    self->delay_std = -1;
    self->fraction_poor_delays = -1;
    return;
  }

  // 中位数倒计时的起始值。
  delay_values = self->num_delay_values >> 1;
  // 获取自上次更新以来的延迟值的中位数。
  for (i = 0; i < kHistorySizeBlocks; i++) {
    delay_values -= self->delay_histogram[i];
    if (delay_values < 0) {
      median = i;
      break;
    }
  }
  // 提前考虑。
  self->delay_median = (median - lookahead) * kMsPerBlock;

  //计算L1范数,以中位数为中心矩。
  for (i = 0; i < kHistorySizeBlocks; i++) {
    l1_norm += abs(i - median) * self->delay_histogram[i];
  }
  self->delay_std = (int)((l1_norm + self->num_delay_values / 2) /
      self->num_delay_values) * kMsPerBlock;

  // 确定超出范围的延迟比例,即
   //负数(反因果系统)或大于AEC过滤器长度。
  {
    int num_delays_out_of_bounds = self->num_delay_values;
    const int histogram_length = sizeof(self->delay_histogram) /
      sizeof(self->delay_histogram[0]);
    for (i = lookahead; i < lookahead + self->num_partitions; ++i) {
      if (i < histogram_length)
        num_delays_out_of_bounds -= self->delay_histogram[i];
    }
    self->fraction_poor_delays = (float)num_delays_out_of_bounds /
        self->num_delay_values;
  }

  // 重写 histogram.
  memset(self->delay_histogram, 0, sizeof(self->delay_histogram));
  self->num_delay_values = 0;

  return;
}
//时间频率
static void TimeToFrequency(float time_data[PART_LEN2],
                            float freq_data[2][PART_LEN1],
                            int window) {
  int i = 0;

  // TODO(bjornv): Should we have a different function/wrapper for windowed FFT?
  if (window) {
    for (i = 0; i < PART_LEN; i++) {
      time_data[i] *= WebRtcAec_sqrtHanning[i];
      time_data[PART_LEN + i] *= WebRtcAec_sqrtHanning[PART_LEN - i];
    }
  }

  aec_rdft_forward_128(time_data);
  // Reorder.重新排序
  freq_data[1][0] = 0;
  freq_data[1][PART_LEN] = 0;
  freq_data[0][0] = time_data[0];
  freq_data[0][PART_LEN] = time_data[1];
  for (i = 1; i < PART_LEN; i++) {
    freq_data[0][i] = time_data[2 * i];
    freq_data[1][i] = time_data[2 * i + 1];
  }
}
//无需系统延迟更新即可移动远读Ptr
static int MoveFarReadPtrWithoutSystemDelayUpdate(AecCore* self, int elements) {
  WebRtc_MoveReadPtr(self->far_buf_windowed, elements);
#ifdef WEBRTC_AEC_DEBUG_DUMP
  WebRtc_MoveReadPtr(self->far_time_buf, elements);
#endif
  return WebRtc_MoveReadPtr(self->far_buf, elements);
}
//基于信号的延迟校正
static int SignalBasedDelayCorrection(AecCore* self) {
  int delay_correction = 0;
  int last_delay = -2;
  assert(self != NULL);
#if !defined(WEBRTC_ANDROID)
 //在桌面上,在| kDelayCorrectionStart |之后打开校正 框架。 这个
   //是为了让延迟估计有收敛的机会。 另外,如果
   //播放的音频音量很小(甚至静音),延迟估计可以返回
   //非常大的延迟,如果应用了AEC,则会中断AEC。
  if (self->frame_count < kDelayCorrectionStart) {
    return 0;
  }
#endif

  // 1.检查非负延迟估计。 请注意,我们得到的估算值
   //延迟估计不会补偿超前。 因此,
   //否| last_delay | 是无效的。
   // 2.确认存在延迟更改。 此外,仅允许更改
   //如果延迟超出某个区域,则采用AEC滤波器长度
   //考虑在内。
   // TODO(bjornv):研究是否可以删除非零延迟更改检查。
   // 3.仅当延迟估计质量超过时才允许延迟校正
   // | delay_quality_threshold |。
   // 4.最后,验证建议的| delay_correction | 是可行的
   //与远端缓冲区的大小进行比较。
  last_delay = WebRtc_last_delay(self->delay_estimator);
  if ((last_delay >= 0) &&
      (last_delay != self->previous_delay) &&
      (WebRtc_last_delay_quality(self->delay_estimator) >
           self->delay_quality_threshold)) {
    int delay = last_delay - WebRtc_lookahead(self->delay_estimator);
    //允许实际延迟,由| lower_bound |定义 和
    // | upper_bound |。 自适应回声消除滤波器目前
     // | num_partitions | (共64个样本)长。 如果延迟估计为负
     //或至少打开过滤器长度的3/4进行校正。
    const int lower_bound = 0;
    const int upper_bound = self->num_partitions * 3 / 4;
    const int do_correction = delay <= lower_bound || delay > upper_bound;
    if (do_correction == 1) {
      int available_read = (int)WebRtc_available_read(self->far_buf);
      // 具有| shift_offset | 我们逐渐依赖延迟估算。 对于
       //正延迟,我们通过| shift_offset |减少校正 降低
       //有将AEC置于非因果状态的风险。 对于负面的延迟
       //我们依靠值直至舍入误差,因此补偿1
       //元素,以确保将延迟推入因果区域。
      delay_correction = -delay;
      delay_correction += delay > self->shift_offset ? self->shift_offset : 1;
      self->shift_offset--;
      self->shift_offset = (self->shift_offset <= 1 ? 1 : self->shift_offset);
      if (delay_correction > available_read - self->mult - 1) {
        // 缓冲区中没有足够的数据来执行此移位。 因此,
         //我们不依赖延迟估计,并且什么也不做。
        delay_correction = 0;
      } else {
        self->previous_delay = last_delay;
        ++self->delay_correction_count;
      }
    }
  }
 //更新| delay_quality_threshold | 一旦我们有第一次延迟
   //更正。
  if (self->delay_correction_count > 0) {
    float delay_quality = WebRtc_last_delay_quality(self->delay_estimator);
    delay_quality = (delay_quality > kDelayQualityThresholdMax ?
        kDelayQualityThresholdMax : delay_quality);
    self->delay_quality_threshold =
        (delay_quality > self->delay_quality_threshold ? delay_quality :
            self->delay_quality_threshold);
  }
  return delay_correction;
}
//NLP非线性处理过程:
static void NonLinearProcessing(AecCore* aec,
                                float* output,
                                float* const* outputH) {
  float efw[2][PART_LEN1], xfw[2][PART_LEN1];
  complex_t comfortNoiseHband[PART_LEN1];
  float fft[PART_LEN2];
  float scale, dtmp;
  float nlpGainHband;//nlp增益子带
  int i;
  size_t j;
/*计算相关性*/
  // 相干和非线性滤波器
  //conde:表示近端和误差信号的相关性,conde越大回声就越小
  //cohxd:远端与近端信号相关性,cohxd值越大回声就越大
  float cohde[PART_LEN1], cohxd[PART_LEN1];
  //hNlDeAvg 表示参考信号与mic接收信号的不相关性;hNlXdAvg 表示aec输出信号与mic接收信号的相关性。
  /*主要用于更新hNlXdAvg的最小值hNlXdAvgMin。数值0.75控制了该更新的频率,如果或者数值越大,表面hNlXdAvgMin的更新频率越快,对残留回声也会越敏感*/
  float hNlDeAvg, hNlXdAvg;
  float hNl[PART_LEN1];
  //首选子带大小
  float hNlPref[kPrefBandSize];
  float hNlFb = 0, hNlFbLow = 0;
  //prefBandQuant:首选子带数量
  const float prefBandQuant = 0.75f, prefBandQuantLow = 0.5f;
  const int prefBandSize = kPrefBandSize / aec->mult;
  const int minPrefBand = 4 / aec->mult;
  // 功率估计平滑系数。
  const float* min_overdrive = aec->extended_filter_enabled
                                   ? kExtendedMinOverDrive
                                   : kNormalMinOverDrive;

  // Filter energy
  const int delayEstInterval = 10 * aec->mult;

  float* xfw_ptr = NULL;

  aec->delayEstCtr++;
  if (aec->delayEstCtr == delayEstInterval) {
    aec->delayEstCtr = 0;
  }

  // 初始化H波段的舒适噪音
  memset(comfortNoiseHband, 0, sizeof(comfortNoiseHband));
  nlpGainHband = (float)0.0;
  dtmp = (float)0.0;

  // 我们应该至少在| far_buf |中存储至少一个元素。
  assert(WebRtc_available_read(aec->far_buf_windowed) > 0);
  // NLP
  WebRtc_ReadBuffer(aec->far_buf_windowed, (void**)&xfw_ptr, &xfw[0][0], 1);
// TODO(bjornv):研究是否可以重用| far_buf_windowed | 代替
   // | xfwBuf |。
   //远端缓冲远容量。
  memcpy(aec->xfwBuf, xfw_ptr, sizeof(float) * 2 * PART_LEN1);
//自带相关性
  WebRtcAec_SubbandCoherence(aec, efw, xfw, fft, cohde, cohxd);

  hNlXdAvg = 0;
  for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) {
    hNlXdAvg += cohxd[i];
  }
  hNlXdAvg /= prefBandSize;
  hNlXdAvg = 1 - hNlXdAvg;

  hNlDeAvg = 0;
  for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) {
    hNlDeAvg += cohde[i];
  }
  hNlDeAvg /= prefBandSize;
/*主要用于更新hNlXdAvg的最小值hNlXdAvgMin。数值0.75控制了该更新的频率,如果或者数值越大,表面hNlXdAvgMin的更新频率越快,对残留回声也会越敏感*/
  if (hNlXdAvg < 0.75f && hNlXdAvg < aec->hNlXdAvgMin) {
    aec->hNlXdAvgMin = hNlXdAvg;
  }

  if (hNlDeAvg > 0.98f && hNlXdAvg > 0.9f) {
  /*aec输出信号与mic接收信号相关性大,同时参考信号与mic接收信号的不相关性较大,说明此时只有近端信号,或者残留信号非常弱*/
    aec->stNearState = 1;//在只存在近端语音的情况下设置近端状态为1
  } else if (hNlDeAvg < 0.95f || hNlXdAvg < 0.8f) {
  /*aec输出信号与mic接收信号相关性较小,或者参考信号与mic接收信号的不相关性较小(相关性较大),说明此时存在残留回声需要抑制*/
    aec->stNearState = 0;//在存在远端回声则设置状态为0
  }

  if (aec->hNlXdAvgMin == 1) {
    aec->echoState = 0;
    aec->overDrive = min_overdrive[aec->nlp_mode];

    if (aec->stNearState == 1) {
      memcpy(hNl, cohde, sizeof(hNl));
      hNlFb = hNlDeAvg;
      hNlFbLow = hNlDeAvg;
    } else {
      for (i = 0; i < PART_LEN1; i++) {
        hNl[i] = 1 - cohxd[i];
      }
      hNlFb = hNlXdAvg;
      hNlFbLow = hNlXdAvg;
    }
  } else {

    if (aec->stNearState == 1) {
      aec->echoState = 0;
      memcpy(hNl, cohde, sizeof(hNl));
      hNlFb = hNlDeAvg;
      hNlFbLow = hNlDeAvg;
    } else {
      aec->echoState = 1;
      for (i = 0; i < PART_LEN1; i++) {
        hNl[i] = WEBRTC_SPL_MIN(cohde[i], 1 - cohxd[i]);
      }

     //从首选频段中选择顺序统计信息。
       // TODO:现在使用quicksort,但是选择算法可能是首选。
      memcpy(hNlPref, &hNl[minPrefBand], sizeof(float) * prefBandSize);
      qsort(hNlPref, prefBandSize, sizeof(float), CmpFloat);
      hNlFb = hNlPref[(int)floor(prefBandQuant * (prefBandSize - 1))];
      hNlFbLow = hNlPref[(int)floor(prefBandQuantLow * (prefBandSize - 1))];
    }
  }
/*检测一段时间内是否出现了更小的hNlFbMin,hNlFbMin用来更新overd的抑制程度。数值0.6用来控制参数更新频率,该数值越大hNlFbMin更新越频繁,对于残留回声会越敏感*/
  // 跟踪本地滤波器最小值以确定抑制过载。
  if (hNlFbLow < 0.6f && hNlFbLow < aec->hNlFbLocalMin) {
    aec->hNlFbLocalMin = hNlFbLow;
    aec->hNlFbMin = hNlFbLow;
    aec->hNlNewMin = 1;
    aec->hNlMinCtr = 0;
  }
  /*以下两个参数以固定的步长更新,为的是hNlXdAvgMin与hNlFbMin不会陷入死锁状态无法更新。当然这里的步长因子也可以控制上述两个数值的更新频率,一般是步长因子越大更新越频繁*/
  aec->hNlFbLocalMin =
      WEBRTC_SPL_MIN(aec->hNlFbLocalMin + 0.0008f / aec->mult, 1);
  aec->hNlXdAvgMin = WEBRTC_SPL_MIN(aec->hNlXdAvgMin + 0.0006f / aec->mult, 1);

  if (aec->hNlNewMin == 1) {
    aec->hNlMinCtr++;
  }
  /*hNlMinCtr == 2表明hNlFbMin只在当前帧更新,而下一帧不更新。也即,当前帧找到最小数值需要连续满足hnlMinCtr - 1帧,防止误触发*/
  if (aec->hNlMinCtr == 2) {
    aec->hNlNewMin = 0;
    aec->hNlMinCtr = 0;
    /*kTargetSupp[aec->nlp_mode]用来设置当前帧抑制多少dB*/
    aec->overDrive =
        WEBRTC_SPL_MAX(kTargetSupp[aec->nlp_mode] /
                           ((float)log(aec->hNlFbMin + 1e-10f) + 1e-10f),
                       min_overdrive[aec->nlp_mode]);
  }

  //平滑过载。
  if (aec->overDrive < aec->overDriveSm) {
    aec->overDriveSm = 0.99f * aec->overDriveSm + 0.01f * aec->overDrive;
  } else {
    aec->overDriveSm = 0.9f * aec->overDriveSm + 0.1f * aec->overDrive;
  }

  WebRtcAec_OverdriveAndSuppress(aec, hNl, hNlFb, efw);

  // Add comfort noise.
  WebRtcAec_ComfortNoise(aec, efw, comfortNoiseHband, aec->noisePow, hNl);

  // TODO(bjornv): 研究在以下情况下如何考虑以下窗口
   //需要。
  if (aec->metricsMode == 1) {
    // 注意,我们在时域| eBuf |中将比例缩放为2。
     //另外,在转换前将时域信号加窗,
     //平均损失一半的能量。 我们先考虑仅在UpdateMetrics()中缩放。
    UpdateLevel(&aec->nlpoutlevel, efw);
  }
  // 逆 error fft.
  fft[0] = efw[0][0];
  fft[1] = efw[0][PART_LEN];
  for (i = 1; i < PART_LEN; i++) {
    fft[2 * i] = efw[0][i];
    // Ooura fft要求更信号。
    fft[2 * i + 1] = -efw[1][i];
  }
  aec_rdft_inverse_128(fft);

  // 重叠并相加以获得输出。
  scale = 2.0f / PART_LEN2;
  for (i = 0; i < PART_LEN; i++) {
    fft[i] *= scale;  // fft scaling
    fft[i] = fft[i] * WebRtcAec_sqrtHanning[i] + aec->outBuf[i];

    fft[PART_LEN + i] *= scale;  // fft scaling
    aec->outBuf[i] = fft[PART_LEN + i] * WebRtcAec_sqrtHanning[PART_LEN - i];

    // 饱和输出以使其保持在允许范围内。
    output[i] = WEBRTC_SPL_SAT(
        WEBRTC_SPL_WORD16_MAX, fft[i], WEBRTC_SPL_WORD16_MIN);
  }

  // For H band
  if (aec->num_bands > 1) {

  // H波段增益
     //低频段的平均nlp:频率频谱后半段的平均值
     //(4-> 8khz)
    GetHighbandGain(hNl, &nlpGainHband);

    // 逆舒适噪音
    if (flagHbandCn == 1) {
      fft[0] = comfortNoiseHband[0][0];
      fft[1] = comfortNoiseHband[PART_LEN][0];
      for (i = 1; i < PART_LEN; i++) {
        fft[2 * i] = comfortNoiseHband[i][0];
        fft[2 * i + 1] = comfortNoiseHband[i][1];
      }
      aec_rdft_inverse_128(fft);
      scale = 2.0f / PART_LEN2;
    }

    // 计算增益因子
    for (j = 0; j < aec->num_bands - 1; ++j) {
      for (i = 0; i < PART_LEN; i++) {
        dtmp = aec->dBufH[j][i];
        dtmp = dtmp * nlpGainHband;  // 可变增益

        // 在Hband衰减的地方添加一些舒适噪音
        if (flagHbandCn == 1 && j == 0) {
          fft[i] *= scale;  // fft scaling
          dtmp += cnScaleHband * fft[i];
        }

        // 饱和输出以使其保持在允许范围内。
        outputH[j][i] = WEBRTC_SPL_SAT(
            WEBRTC_SPL_WORD16_MAX, dtmp, WEBRTC_SPL_WORD16_MIN);
      }
    }
  }

  //将当前块复制到旧位置。
  memcpy(aec->dBuf, aec->dBuf + PART_LEN, sizeof(float) * PART_LEN);
  memcpy(aec->eBuf, aec->eBuf + PART_LEN, sizeof(float) * PART_LEN);

  // 将当前块复制到H波段的旧位置
  for (j = 0; j < aec->num_bands - 1; ++j) {
    memcpy(aec->dBufH[j], aec->dBufH[j] + PART_LEN, sizeof(float) * PART_LEN);
  }

  memmove(aec->xfwBuf + PART_LEN1,
          aec->xfwBuf,
          sizeof(aec->xfwBuf) - sizeof(complex_t) * PART_LEN1);
}

static void ProcessBlock(AecCore* aec) {
  size_t i;
  float y[PART_LEN], e[PART_LEN];
  float scale;

  float fft[PART_LEN2];
  float xf[2][PART_LEN1], yf[2][PART_LEN1], ef[2][PART_LEN1];
  float df[2][PART_LEN1];
  float far_spectrum = 0.0f;
  float near_spectrum = 0.0f;
  float abs_far_spectrum[PART_LEN1];
  float abs_near_spectrum[PART_LEN1];

  const float gPow[2] = {0.9f, 0.1f};

  // 噪声估计常数。
  const int noiseInitBlocks = 500 * aec->mult;
  const float step = 0.1f;
  const float ramp = 1.0002f;
  const float gInitNoise[2] = {0.999f, 0.001f};

  float nearend[PART_LEN];
  float* nearend_ptr = NULL;
  float output[PART_LEN];
  float outputH[NUM_HIGH_BANDS_MAX][PART_LEN];
  float* outputH_ptr[NUM_HIGH_BANDS_MAX];
  for (i = 0; i < NUM_HIGH_BANDS_MAX; ++i) {
    outputH_ptr[i] = outputH[i];
  }

  float* xf_ptr = NULL;

  // 连接旧的和新的近端块。
  for (i = 0; i < aec->num_bands - 1; ++i) {
    WebRtc_ReadBuffer(aec->nearFrBufH[i],
                      (void**)&nearend_ptr,
                      nearend,
                      PART_LEN);
    memcpy(aec->dBufH[i] + PART_LEN, nearend_ptr, sizeof(nearend));
  }
  WebRtc_ReadBuffer(aec->nearFrBuf, (void**)&nearend_ptr, nearend, PART_LEN);
  memcpy(aec->dBuf + PART_LEN, nearend_ptr, sizeof(nearend));

  // ---------- Ooura fft ----------

#ifdef WEBRTC_AEC_DEBUG_DUMP
  {
    float farend[PART_LEN];
    float* farend_ptr = NULL;
    WebRtc_ReadBuffer(aec->far_time_buf, (void**)&farend_ptr, farend, 1);
    RTC_AEC_DEBUG_WAV_WRITE(aec->farFile, farend_ptr, PART_LEN);
    RTC_AEC_DEBUG_WAV_WRITE(aec->nearFile, nearend_ptr, PART_LEN);
  }
#endif

  //我们应该至少在| far_buf |中存储至少一个元素。
  assert(WebRtc_available_read(aec->far_buf) > 0);
  WebRtc_ReadBuffer(aec->far_buf, (void**)&xf_ptr, &xf[0][0], 1);

  // Near fft
  memcpy(fft, aec->dBuf, sizeof(float) * PART_LEN2);
  TimeToFrequency(fft, df, 0);

  // 功率平滑
  for (i = 0; i < PART_LEN1; i++) {
    far_spectrum = (xf_ptr[i] * xf_ptr[i]) +
                   (xf_ptr[PART_LEN1 + i] * xf_ptr[PART_LEN1 + i]);
    aec->xPow[i] =
        gPow[0] * aec->xPow[i] + gPow[1] * aec->num_partitions * far_spectrum;
    // 计算绝对 spectra
    abs_far_spectrum[i] = sqrtf(far_spectrum);

    near_spectrum = df[0][i] * df[0][i] + df[1][i] * df[1][i];
    aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] * near_spectrum;
    //计算绝对 spectra
    abs_near_spectrum[i] = sqrtf(near_spectrum);
  }

  // E刺激噪音。 等待直到dPow更稳定。
  if (aec->noiseEstCtr > 50) {
    for (i = 0; i < PART_LEN1; i++) {
      if (aec->dPow[i] < aec->dMinPow[i]) {
        aec->dMinPow[i] =
            (aec->dPow[i] + step * (aec->dMinPow[i] - aec->dPow[i])) * ramp;
      } else {
        aec->dMinPow[i] *= ramp;
      }
    }
  }

  // 从一开始就平稳地增加噪声功率,从零开始,
   //避免突然产生的舒适噪音。
  if (aec->noiseEstCtr < noiseInitBlocks) {
    aec->noiseEstCtr++;
    for (i = 0; i < PART_LEN1; i++) {
      if (aec->dMinPow[i] > aec->dInitMinPow[i]) {
        aec->dInitMinPow[i] = gInitNoise[0] * aec->dInitMinPow[i] +
                              gInitNoise[1] * aec->dMinPow[i];
      } else {
        aec->dInitMinPow[i] = aec->dMinPow[i];
      }
    }
    aec->noisePow = aec->dInitMinPow;
  } else {
    aec->noisePow = aec->dMinPow;
  }

  // 用于记录的逐块延迟估计
  if (aec->delay_logging_enabled) {
    if (WebRtc_AddFarSpectrumFloat(
            aec->delay_estimator_farend, abs_far_spectrum, PART_LEN1) == 0) {
      int delay_estimate = WebRtc_DelayEstimatorProcessFloat(
          aec->delay_estimator, abs_near_spectrum, PART_LEN1);
      if (delay_estimate >= 0) {
        // 更新延迟估计缓冲区.
        aec->delay_histogram[delay_estimate]++;
        aec->num_delay_values++;
      }
      if (aec->delay_metrics_delivered == 1 &&
          aec->num_delay_values >= kDelayMetricsAggregationWindow) {
        UpdateDelayMetrics(aec);
      }
    }
  }

  //更新xfBuf块的位置。
  aec->xfBufBlockPos--;
  if (aec->xfBufBlockPos == -1) {
    aec->xfBufBlockPos = aec->num_partitions - 1;
  }

  // Buffer xf
  memcpy(aec->xfBuf[0] + aec->xfBufBlockPos * PART_LEN1,
         xf_ptr,
         sizeof(float) * PART_LEN1);
  memcpy(aec->xfBuf[1] + aec->xfBufBlockPos * PART_LEN1,
         &xf_ptr[PART_LEN1],
         sizeof(float) * PART_LEN1);

  memset(yf, 0, sizeof(yf));

  // Filter far
  WebRtcAec_FilterFar(aec, yf);

  //逆fft以获得回波估计和误差。
  fft[0] = yf[0][0];
  fft[1] = yf[0][PART_LEN];
  for (i = 1; i < PART_LEN; i++) {
    fft[2 * i] = yf[0][i];
    fft[2 * i + 1] = yf[1][i];
  }
  aec_rdft_inverse_128(fft);

  scale = 2.0f / PART_LEN2;
  for (i = 0; i < PART_LEN; i++) {
    y[i] = fft[PART_LEN + i] * scale;  // fft scaling
  }

  for (i = 0; i < PART_LEN; i++) {
    e[i] = nearend_ptr[i] - y[i];
  }

  // Error fft
  memcpy(aec->eBuf + PART_LEN, e, sizeof(float) * PART_LEN);
  memset(fft, 0, sizeof(float) * PART_LEN);
  memcpy(fft + PART_LEN, e, sizeof(float) * PART_LEN);
  // TODO(bjornv): Change to use TimeToFrequency().
  aec_rdft_forward_128(fft);

  ef[1][0] = 0;
  ef[1][PART_LEN] = 0;
  ef[0][0] = fft[0];
  ef[0][PART_LEN] = fft[1];
  for (i = 1; i < PART_LEN; i++) {
    ef[0][i] = fft[2 * i];
    ef[1][i] = fft[2 * i + 1];
  }

  RTC_AEC_DEBUG_RAW_WRITE(aec->e_fft_file,
                          &ef[0][0],
                          sizeof(ef[0][0]) * PART_LEN1 * 2);

  if (aec->metricsMode == 1) {
    //请注意,在转换之前,ftf中的前PART_LEN个样本是
     //零。 因此,在UpdateLevel()中缩放为2不应为
     //执行。 该缩放是在UpdateMetrics()中进行的。
    UpdateLevel(&aec->linoutlevel, ef);
  }

  // 与远功率成反比地缩放误差信号。
  WebRtcAec_ScaleErrorSignal(aec, ef);
  WebRtcAec_FilterAdaptation(aec, fft, ef);
  NonLinearProcessing(aec, output, outputH_ptr);

  if (aec->metricsMode == 1) {
    //更新功率水平和回声指标
    UpdateLevel(&aec->farlevel, (float(*)[PART_LEN1])xf_ptr);
    UpdateLevel(&aec->nearlevel, df);
    UpdateMetrics(aec);
  }

  // 存储输出块。
  WebRtc_WriteBuffer(aec->outFrBuf, output, PART_LEN);
  // 对于高频段
  for (i = 0; i < aec->num_bands - 1; ++i) {
    WebRtc_WriteBuffer(aec->outFrBufH[i], outputH[i], PART_LEN);
  }

  RTC_AEC_DEBUG_WAV_WRITE(aec->outLinearFile, e, PART_LEN);
  RTC_AEC_DEBUG_WAV_WRITE(aec->outFile, output, PART_LEN);
}

AecCore* WebRtcAec_CreateAec() {
  int i;
  AecCore* aec = malloc(sizeof(AecCore));
  if (!aec) {
    return NULL;
  }

  aec->nearFrBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(float));
  if (!aec->nearFrBuf) {
    WebRtcAec_FreeAec(aec);
    return NULL;
  }

  aec->outFrBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(float));
  if (!aec->outFrBuf) {
    WebRtcAec_FreeAec(aec);
    return NULL;
  }

  for (i = 0; i < NUM_HIGH_BANDS_MAX; ++i) {
    aec->nearFrBufH[i] = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN,
                                             sizeof(float));
    if (!aec->nearFrBufH[i]) {
      WebRtcAec_FreeAec(aec);
      return NULL;
    }
    aec->outFrBufH[i] = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN,
                                            sizeof(float));
    if (!aec->outFrBufH[i]) {
      WebRtcAec_FreeAec(aec);
      return NULL;
    }
  }

  // Create far-end buffers.
  aec->far_buf =
      WebRtc_CreateBuffer(kBufSizePartitions, sizeof(float) * 2 * PART_LEN1);
  if (!aec->far_buf) {
    WebRtcAec_FreeAec(aec);
    return NULL;
  }
  aec->far_buf_windowed =
      WebRtc_CreateBuffer(kBufSizePartitions, sizeof(float) * 2 * PART_LEN1);
  if (!aec->far_buf_windowed) {
    WebRtcAec_FreeAec(aec);
    return NULL;
  }
#ifdef WEBRTC_AEC_DEBUG_DUMP
  aec->instance_index = webrtc_aec_instance_count;
  aec->far_time_buf =
      WebRtc_CreateBuffer(kBufSizePartitions, sizeof(float) * PART_LEN);
  if (!aec->far_time_buf) {
    WebRtcAec_FreeAec(aec);
    return NULL;
  }
  aec->farFile = aec->nearFile = aec->outFile = aec->outLinearFile = NULL;
  aec->debug_dump_count = 0;
#endif
  aec->delay_estimator_farend =
      WebRtc_CreateDelayEstimatorFarend(PART_LEN1, kHistorySizeBlocks);
  if (aec->delay_estimator_farend == NULL) {
    WebRtcAec_FreeAec(aec);
    return NULL;
  }
  //我们创建与最大提前量相同的delay_estimator
   //由于对称性原因,延迟历史记录大小(kHistorySizeBlocks)。
  aec->delay_estimator = WebRtc_CreateDelayEstimator(
      aec->delay_estimator_farend, kHistorySizeBlocks);
  if (aec->delay_estimator == NULL) {
    WebRtcAec_FreeAec(aec);
    return NULL;
  }
#ifdef WEBRTC_ANDROID
  aec->delay_agnostic_enabled = 1; //默认启用DA-AEC。
   // DA-AEC假设系统从一开始就是因果关系,并且会自我调整
   //需要移位时的前瞻。
  WebRtc_set_lookahead(aec->delay_estimator, 0);
#else
  aec->delay_agnostic_enabled = 0;
  WebRtc_set_lookahead(aec->delay_estimator, kLookaheadBlocks);
#endif
  aec->extended_filter_enabled = 0;

  // 装配优化
  WebRtcAec_FilterFar = FilterFar;
  WebRtcAec_ScaleErrorSignal = ScaleErrorSignal;
  WebRtcAec_FilterAdaptation = FilterAdaptation;
  WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppress;
  WebRtcAec_ComfortNoise = ComfortNoise;
  WebRtcAec_SubbandCoherence = SubbandCoherence;

#if defined(WEBRTC_ARCH_X86_FAMILY)
  if (WebRtc_GetCPUInfo(kSSE2)) {
    WebRtcAec_InitAec_SSE2();
  }
#endif

#if defined(MIPS_FPU_LE)
  WebRtcAec_InitAec_mips();
#endif

#if defined(WEBRTC_HAS_NEON)
  WebRtcAec_InitAec_neon();
#elif defined(WEBRTC_DETECT_NEON)
  if ((WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON) != 0) {
    WebRtcAec_InitAec_neon();
  }
#endif

  aec_rdft_init();

  return aec;
}

void WebRtcAec_FreeAec(AecCore* aec) {
  int i;
  if (aec == NULL) {
    return;
  }

  WebRtc_FreeBuffer(aec->nearFrBuf);
  WebRtc_FreeBuffer(aec->outFrBuf);

  for (i = 0; i < NUM_HIGH_BANDS_MAX; ++i) {
    WebRtc_FreeBuffer(aec->nearFrBufH[i]);
    WebRtc_FreeBuffer(aec->outFrBufH[i]);
  }

  WebRtc_FreeBuffer(aec->far_buf);
  WebRtc_FreeBuffer(aec->far_buf_windowed);
#ifdef WEBRTC_AEC_DEBUG_DUMP
  WebRtc_FreeBuffer(aec->far_time_buf);
#endif
  RTC_AEC_DEBUG_WAV_CLOSE(aec->farFile);
  RTC_AEC_DEBUG_WAV_CLOSE(aec->nearFile);
  RTC_AEC_DEBUG_WAV_CLOSE(aec->outFile);
  RTC_AEC_DEBUG_WAV_CLOSE(aec->outLinearFile);
  RTC_AEC_DEBUG_RAW_CLOSE(aec->e_fft_file);

  WebRtc_FreeDelayEstimator(aec->delay_estimator);
  WebRtc_FreeDelayEstimatorFarend(aec->delay_estimator_farend);

  free(aec);
}

int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
  int i;

  aec->sampFreq = sampFreq;

  if (sampFreq == 8000) {
    aec->normal_mu = 0.6f;
    aec->normal_error_threshold = 2e-6f;
    aec->num_bands = 1;
  } else {
    aec->normal_mu = 0.5f;
    aec->normal_error_threshold = 1.5e-6f;
    aec->num_bands = (size_t)(sampFreq / 16000);
  }

  WebRtc_InitBuffer(aec->nearFrBuf);
  WebRtc_InitBuffer(aec->outFrBuf);
  for (i = 0; i < NUM_HIGH_BANDS_MAX; ++i) {
    WebRtc_InitBuffer(aec->nearFrBufH[i]);
    WebRtc_InitBuffer(aec->outFrBufH[i]);
  }

  // 初始化 far-end buffers.
  WebRtc_InitBuffer(aec->far_buf);
  WebRtc_InitBuffer(aec->far_buf_windowed);
#ifdef WEBRTC_AEC_DEBUG_DUMP
  WebRtc_InitBuffer(aec->far_time_buf);
  {
    int process_rate = sampFreq > 16000 ? 16000 : sampFreq;
    RTC_AEC_DEBUG_WAV_REOPEN("aec_far", aec->instance_index,
                             aec->debug_dump_count, process_rate,
                             &aec->farFile );
    RTC_AEC_DEBUG_WAV_REOPEN("aec_near", aec->instance_index,
                             aec->debug_dump_count, process_rate,
                             &aec->nearFile);
    RTC_AEC_DEBUG_WAV_REOPEN("aec_out", aec->instance_index,
                             aec->debug_dump_count, process_rate,
                             &aec->outFile );
    RTC_AEC_DEBUG_WAV_REOPEN("aec_out_linear", aec->instance_index,
                             aec->debug_dump_count, process_rate,
                             &aec->outLinearFile);
  }

  RTC_AEC_DEBUG_RAW_OPEN("aec_e_fft",
                         aec->debug_dump_count,
                         &aec->e_fft_file);

  ++aec->debug_dump_count;
#endif
  aec->system_delay = 0;

  if (WebRtc_InitDelayEstimatorFarend(aec->delay_estimator_farend) != 0) {
    return -1;
  }
  if (WebRtc_InitDelayEstimator(aec->delay_estimator) != 0) {
    return -1;
  }
  aec->delay_logging_enabled = 0;
  aec->delay_metrics_delivered = 0;
  memset(aec->delay_histogram, 0, sizeof(aec->delay_histogram));
  aec->num_delay_values = 0;
  aec->delay_median = -1;
  aec->delay_std = -1;
  aec->fraction_poor_delays = -1.0f;

  aec->signal_delay_correction = 0;
  aec->previous_delay = -2;  // (-2): Uninitialized.
  aec->delay_correction_count = 0;
  aec->shift_offset = kInitialShiftOffset;
  aec->delay_quality_threshold = kDelayQualityThresholdMin;

  aec->num_partitions = kNormalNumPartitions;

  //使用滤波器长度更新延迟估算器。 我们用一半| num_partitions | 考虑回声路径。 实际上我们说
   //回声的持续时间最大为一半| num_partitions |,而不是
   //是,但仅作为粗略的度量。
  WebRtc_set_allowed_offset(aec->delay_estimator, aec->num_partitions / 2);
  //TODO(bjornv):我目前对启用代码进行了硬编码。 一旦建立
   // AECM没有性能下降,将启用robust_validation
   //一直删除,并将其打开/关闭的API将被删除。 因此,删除
   //这行。
  WebRtc_enable_robust_validation(aec->delay_estimator, 1);
  aec->frame_count = 0;

  // 默认目标抑制模式。
  aec->nlp_mode = 1;

  // 采样倍频器w.r.t. 8 kHz。
   //如果有多个频段,我们以16 kHz的频率处理较低频段,因此
   //乘数始终为2。
  if (aec->num_bands > 1) {
    aec->mult = 2;
  } else {
    aec->mult = (short)aec->sampFreq / 8000;
  }

  aec->farBufWritePos = 0;
  aec->farBufReadPos = 0;

  aec->inSamples = 0;
  aec->outSamples = 0;
  aec->knownDelay = 0;

  // Initialize buffers
  memset(aec->dBuf, 0, sizeof(aec->dBuf));
  memset(aec->eBuf, 0, sizeof(aec->eBuf));
  // For H bands
  for (i = 0; i < NUM_HIGH_BANDS_MAX; ++i) {
    memset(aec->dBufH[i], 0, sizeof(aec->dBufH[i]));
  }

  memset(aec->xPow, 0, sizeof(aec->xPow));
  memset(aec->dPow, 0, sizeof(aec->dPow));
  memset(aec->dInitMinPow, 0, sizeof(aec->dInitMinPow));
  aec->noisePow = aec->dInitMinPow;
  aec->noiseEstCtr = 0;

  // 初始化舒适噪音
  for (i = 0; i < PART_LEN1; i++) {
    aec->dMinPow[i] = 1.0e6f;
  }

  //保存写入的最后一个块
  aec->xfBufBlockPos = 0;
  // TODO: 研究对这些初始化的需求。 删除它们不会
   //完全改变输出,并产生0.4%的整体加速比。
  memset(aec->xfBuf, 0, sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1);
  memset(aec->wfBuf, 0, sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1);
  memset(aec->sde, 0, sizeof(complex_t) * PART_LEN1);
  memset(aec->sxd, 0, sizeof(complex_t) * PART_LEN1);
  memset(
      aec->xfwBuf, 0, sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1);
  memset(aec->se, 0, sizeof(float) * PART_LEN1);

  // 为了防止第一个程序段中的数值不稳定。
  for (i = 0; i < PART_LEN1; i++) {
    aec->sd[i] = 1;
  }
  for (i = 0; i < PART_LEN1; i++) {
    aec->sx[i] = 1;
  }

  memset(aec->hNs, 0, sizeof(aec->hNs));
  memset(aec->outBuf, 0, sizeof(float) * PART_LEN);

  aec->hNlFbMin = 1;
  aec->hNlFbLocalMin = 1;
  aec->hNlXdAvgMin = 1;
  aec->hNlNewMin = 0;
  aec->hNlMinCtr = 0;
  aec->overDrive = 2;
  aec->overDriveSm = 2;
  aec->delayIdx = 0;
  aec->stNearState = 0;
  aec->echoState = 0;
  aec->divergeState = 0;

  aec->seed = 777;
  aec->delayEstCtr = 0;

  // 默认禁用指标
  aec->metricsMode = 0;
  InitMetrics(aec);

  return 0;
}

void WebRtcAec_BufferFarendPartition(AecCore* aec, const float* farend) {
  float fft[PART_LEN2];
  float xf[2][PART_LEN1];

  // 检查缓冲区是否已满,并在这种情况下刷新最早的数据。
  if (WebRtc_available_write(aec->far_buf) < 1) {
    WebRtcAec_MoveFarReadPtr(aec, 1);
  }
  // 无需窗口即可将远端分区转换到频域。
  memcpy(fft, farend, sizeof(float) * PART_LEN2);
  TimeToFrequency(fft, xf, 0);
  WebRtc_WriteBuffer(aec->far_buf, &xf[0][0], 1);

  //通过加窗将远端分区转换到频域。
  memcpy(fft, farend, sizeof(float) * PART_LEN2);
  TimeToFrequency(fft, xf, 1);
  WebRtc_WriteBuffer(aec->far_buf_windowed, &xf[0][0], 1);
}

int WebRtcAec_MoveFarReadPtr(AecCore* aec, int elements) {
  int elements_moved = MoveFarReadPtrWithoutSystemDelayUpdate(aec, elements);
  aec->system_delay -= elements_moved * PART_LEN;
  return elements_moved;
}

void WebRtcAec_ProcessFrames(AecCore* aec,
                             const float* const* nearend,
                             size_t num_bands,
                             size_t num_samples,
                             int knownDelay,
                             float* const* out) {
  size_t i, j;
  int out_elements = 0;

  aec->frame_count++;
  //对于每个帧,过程如下:
  // 1)如果system_delay指示太小而无法处理
  //帧,我们用足够的数据填充缓冲区10毫秒。
  // 2 a)通过移动读取指针将缓冲区调整为系统延迟。
  // b)如果我们检测到不良的AEC,则应用基于信号的延迟校正
  //性能。
  // 3)TODO(bjornv):研究是否需要添加以下内容:
  //如果由于缓冲区大小限制而无法移动读取指针
  //刷新/填充缓冲区。
  // 4)处理尽可能多的分区。
  // 5)更新| system_delay |关于FRAME_LEN的整个帧
  //样本。即使我们还有待处理的数据(我们与
  //分区),我们考虑更新整个框架,因为
  //我们在audio_processing中输入和输出的数据量。
  // 6)更新输出。

  // AEC内置了两种不同的延迟估计算法。
  //首先依赖于用户的延迟输入值和
  //移位的缓冲元素由| knownDelay |控制。此延迟将
  //猜测要转移多少远端缓冲区才能与之对齐
  //近端信号。另一种延迟估算算法使用
  //远端和近端信号以查找它们之间的偏移。这个
  //(称为“信号延迟”)然后用于微调对齐方式,或者
  //简单地补偿基于系统的错误。
  //请注意,这两种算法是独立运行的。目前,我们只
  //允许打开一种算法。

  assert(aec->num_bands == num_bands);

  for (j = 0; j < num_samples; j+= FRAME_LEN) {
    // TODO(bjornv):将近端缓冲区处理更改为与
     //远端,即具有near_pre_buf。
     //缓冲近端帧。
    WebRtc_WriteBuffer(aec->nearFrBuf, &nearend[0][j], FRAME_LEN);
    // For H band
    for (i = 1; i < num_bands; ++i) {
      WebRtc_WriteBuffer(aec->nearFrBufH[i - 1], &nearend[i][j], FRAME_LEN);
    }

    //1)最多我们在10毫秒内处理| aec-> mult | +1分区。 确保我们
     //通过填充缓冲区(如果
     // | system_delay | 表示其他。
    if (aec->system_delay < FRAME_LEN) {
      // We don't have enough data so we rewind 10 ms.
      WebRtcAec_MoveFarReadPtr(aec, -(aec->mult + 1));
    }

    if (!aec->delay_agnostic_enabled) {
     // 2 a)补偿系统延迟的可能变化。

       // TODO(bjornv):研究如何舍入延迟差;
       //现在,我们知道传入的| knownDelay | 被低估了
       //小于| aec-> knownDelay |。 因此,我们将(-32)舍入为
       //方向。 另一方面,我们没有这种情况,但是
       //可能会冲洗一个分区太少。 这可能会导致非因果关系,
       //应该对其进行调查。 也许允许非对称
       //取整,例如-16。
      int move_elements = (aec->knownDelay - knownDelay - 32) / PART_LEN;
      int moved_elements =
          MoveFarReadPtrWithoutSystemDelayUpdate(aec, move_elements);
      aec->knownDelay -= moved_elements * PART_LEN;
    } else {
      //2 b)应用基于信号的延迟校正。
       int move_elements = SignalBasedDelayCorrection(aec);
      int moved_elements =
          MoveFarReadPtrWithoutSystemDelayUpdate(aec, move_elements);
      int far_near_buffer_diff = WebRtc_available_read(aec->far_buf) -
          WebRtc_available_read(aec->nearFrBuf) / PART_LEN;
      WebRtc_SoftResetDelayEstimator(aec->delay_estimator, moved_elements);
      WebRtc_SoftResetDelayEstimatorFarend(aec->delay_estimator_farend,
                                           moved_elements);
      aec->signal_delay_correction += moved_elements;
      // 如果仅依靠报告的系统延迟值,则此处的缓冲区不足
       //永远不会发生,因为我们已经在上面的1)中进行了处理。 在这里,我们
       //应用基于信号的延迟校正,因此最终可以
       //缓冲区欠载,因为延迟估计可能是错误的。 因此,我们
       //如果需要,用足够的元素填充缓冲区。
      if (far_near_buffer_diff < 0) {
        WebRtcAec_MoveFarReadPtr(aec, far_near_buffer_diff);
      }
    }

    //4)处理尽可能多的块。
    while (WebRtc_available_read(aec->nearFrBuf) >= PART_LEN) {
      ProcessBlock(aec);
    }

    // 5)更新整个帧的系统延迟。
    aec->system_delay -= FRAME_LEN;

    //6)更新输出帧。
     //如果输出少于一帧,则填充out缓冲区。
     //这只应发生在第一帧。
    out_elements = (int)WebRtc_available_read(aec->outFrBuf);
    if (out_elements < FRAME_LEN) {
      WebRtc_MoveReadPtr(aec->outFrBuf, out_elements - FRAME_LEN);
      for (i = 0; i < num_bands - 1; ++i) {
        WebRtc_MoveReadPtr(aec->outFrBufH[i], out_elements - FRAME_LEN);
      }
    }
    // 获取输出帧。
    WebRtc_ReadBuffer(aec->outFrBuf, NULL, &out[0][j], FRAME_LEN);
    //适用于H波段。
    for (i = 1; i < num_bands; ++i) {
      WebRtc_ReadBuffer(aec->outFrBufH[i - 1], NULL, &out[i][j], FRAME_LEN);
    }
  }
}

int WebRtcAec_GetDelayMetricsCore(AecCore* self, int* median, int* std,
                                  float* fraction_poor_delays) {
  assert(self != NULL);
  assert(median != NULL);
  assert(std != NULL);

  if (self->delay_logging_enabled == 0) {
    // Logging disabled.
    return -1;
  }

  if (self->delay_metrics_delivered == 0) {
    UpdateDelayMetrics(self);
    self->delay_metrics_delivered = 1;
  }
  *median = self->delay_median;
  *std = self->delay_std;
  *fraction_poor_delays = self->fraction_poor_delays;

  return 0;
}

int WebRtcAec_echo_state(AecCore* self) { return self->echoState; }

void WebRtcAec_GetEchoStats(AecCore* self,
                            Stats* erl,
                            Stats* erle,
                            Stats* a_nlp) {
  assert(erl != NULL);
  assert(erle != NULL);
  assert(a_nlp != NULL);
  *erl = self->erl;
  *erle = self->erle;
  *a_nlp = self->aNlp;
}

#ifdef WEBRTC_AEC_DEBUG_DUMP
void* WebRtcAec_far_time_buf(AecCore* self) { return self->far_time_buf; }
#endif

void WebRtcAec_SetConfigCore(AecCore* self,
                             int nlp_mode,
                             int metrics_mode,
                             int delay_logging) {
  assert(nlp_mode >= 0 && nlp_mode < 3);
  self->nlp_mode = nlp_mode;
  self->metricsMode = metrics_mode;
  if (self->metricsMode) {
    InitMetrics(self);
  }
  // 如果延迟日志记录是明确设置的或与延迟无关的,请打开
   //启用AEC(需要延迟估计)。
  self->delay_logging_enabled = delay_logging || self->delay_agnostic_enabled;
  if (self->delay_logging_enabled) {
    memset(self->delay_histogram, 0, sizeof(self->delay_histogram));
  }
}

void WebRtcAec_enable_delay_agnostic(AecCore* self, int enable) {
  self->delay_agnostic_enabled = enable;
}

int WebRtcAec_delay_agnostic_enabled(AecCore* self) {
  return self->delay_agnostic_enabled;
}

void WebRtcAec_enable_extended_filter(AecCore* self, int enable) {
  self->extended_filter_enabled = enable;
  self->num_partitions = enable ? kExtendedNumPartitions : kNormalNumPartitions;
  // 用滤波器长度更新延迟估计器。 有关详细信息,请参见InitAEC()。
  WebRtc_set_allowed_offset(self->delay_estimator, self->num_partitions / 2);
}

int WebRtcAec_extended_filter_enabled(AecCore* self) {
  return self->extended_filter_enabled;
}

int WebRtcAec_system_delay(AecCore* self) { return self->system_delay; }

void WebRtcAec_SetSystemDelay(AecCore* self, int delay) {
  assert(delay >= 0);
  self->system_delay = delay;
}

由于webrtc回声消除部分的算法已经更新到AEC3了,想着先把AEC部分看明白,想获取webrtc的modules的完整代码可以在下边链接找:
github链接
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值