Android Audio播放流程

本文涉及的所有代码都是AOSP源码

1. 涉及到的重要Class

Class NameDescription
AudioTrackAudio子系统对外提供的API类,用于音频数据的播放
AudioPolicyServiceAudio子系统的策略控制中心,控制设备的选择或者切换,以及音量控制
AudioFlingerAudio子系统的工作引擎,管理输入输出的音频流,音频数据的混音,以及读写Audio硬件以便控制数据的输入输出。

2. AudioTrack

AudioTrack用于播放PCM流格式的音频数据。播放器会在framework层创建相应的解码器,解码器将MP3,WAV等格式的音频文件解码成PCM流后,将该数据传递给AudioTrack。
AudioTrack有两种播放模式

  • MODE_STREAM:通过write()将data连续的写入AudioTrack。用于数据量大,延时要求低的情况,如:播放音乐。但是一次性不能拷贝太多的数据,否者系统无法分配足够的内存。
  • MODE_STATIC:一次性将data传递到AudioTrack。常用于数据量小,延时要求高的情况,如:UI,游戏音效。

3. 创建AudioTrack对象

  1. 详细解读AudioTrack.java种AudioTrack的构造方法
  • 检查AudioAttribute
  • 检查采样率
  • 检查声道配置
  • 检查data编码
  • 计算buffer大小
  • 检查sessionId
  • 调用native_setup到JNI
	  /**
	   * AudioAttribute attribute:音频流信息属性的集合
	   * AudioFormat format:音频格式,这里的音频格式指的是采样率,编码,声道等信息的集合
	   * int bufferSizeInBytes:AudioTrack内部缓冲区的大小
	   * int mode:MODE_STATIC or MODE_STREAM
	   * int sessionId:AudioTrack必须附加的会话id
	   * boolean offload:是否是offload播放模式,一种直接给到硬件播放的格式
	   * int encapsulationMode:封装模式
	   * TunerConfiguration tunerConfiguration:可为null
	   */
      private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
              int mode, int sessionId, boolean offload, int encapsulationMode,
              @Nullable TunerConfiguration tunerConfiguration)
                      throws IllegalArgumentException {
          super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
          // mState already == STATE_UNINITIALIZED
  
  		  //记录当前的音频流信息属性集合,用于getAudioAttributes()
          mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.
  
  		  //检查音频格式是否为null,是则抛出参数异常的提醒
          if (format == null) {
              throw new IllegalArgumentException("Illegal null AudioFormat");
          }
  
          // Check if we should enable deep buffer mode
          //shouldEnablePowerSaving()是否开启省电模式,如果开启省电模式,则需要重新创建AudioAttribute
          if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
              mAttributes = new AudioAttributes.Builder(mAttributes)
                  .replaceFlags((mAttributes.getAllFlags()
                          | AudioAttributes.FLAG_DEEP_BUFFER)
                          & ~AudioAttributes.FLAG_LOW_LATENCY)
                  .build();
          }
  
          // remember which looper is associated with the AudioTrack instantiation
          //记录当前创建AudioTrack的Looper
          Looper looper;
          if ((looper = Looper.myLooper()) == null) {
              looper = Looper.getMainLooper();
          }
  
  		  //如果没有指定采样率,则赋值为0
          int rate = format.getSampleRate();
          if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
              rate = 0;
          }
  
  		  //channelIndexMask和channelMask互斥,如果有channelIndexMask则不使用channelMask,如没有则使用channelMask
          int channelIndexMask = 0;
          if ((format.getPropertySetMask()
                  & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
              channelIndexMask = format.getChannelIndexMask();
          }
          int channelMask = 0;
          if ((format.getPropertySetMask()
                  & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
              channelMask = format.getChannelMask();
          } else if (channelIndexMask == 0) { // if no masks at all, use stereo
          	  //如果channelMask和channelIndexMask都没有指定,则默认为左右声道
              channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
                      | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
          }
          //获取编码模式
          int encoding = AudioFormat.ENCODING_DEFAULT;
          if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
              encoding = format.getEncoding();
          }
          //audioPramaCheck()检查相关属性值是否合法
          //在指定采样率后不能小于最小(8000Hz),不能大于最大(1600000Hz)
          //声道:如果编码模式为ENCODING_IEC61937,则声道数必须为2或者是8,
          //编码:检查当前编码是否支持使用的声道数,如果没有指定编码格式则默认为PCM 16bit
          //播放模式:检查当前的播放模式是否是STREAM或者STATIC两者之一,如果是STREAM模式,编码模式必须是PCM线性格式的
          audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
          mOffloaded = offload;
          mStreamType = AudioSystem.STREAM_DEFAULT;
  		  //audioBufferSizeCheck()检查缓冲区大小,只对PCM和ENCODING_IEC61937有效
  		  //bufferSize不能小于1,每一帧的大小必须能被buffer整除
          audioBuffSizeCheck(bufferSizeInBytes);
          mInitializationLooper = looper;
  
          if (sessionId < 0) {
              throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
          }
  
          int[] sampleRate = new int[] {mSampleRate};
          int[] session = new int[1];
          session[0] = sessionId;
          // native initialization
          //检查完属性值后,开始创建native方法
          //native_setup()对应android_media_AudioTrack.cpp中android_media_AudioTrack_setup()
          //wakeReference弱引用,便于回收
          int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
                  sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
                  mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
                  offload, encapsulationMode, tunerConfiguration,
                  getCurrentOpPackageName());
          if (initResult != SUCCESS) {
              loge("Error code "+initResult+" when initializing AudioTrack.");
              return; // with mState == STATE_UNINITIALIZED
          }
  
          mSampleRate = sampleRate[0];
          mSessionId = session[0];
  
          // TODO: consider caching encapsulationMode and tunerConfiguration in the Java object.
  
          if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
              int frameSizeInBytes;
              if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
                  frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
              } else {
                  frameSizeInBytes = 1;
              }
              mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
          }
  
          if (mDataLoadMode == MODE_STATIC) {
              mState = STATE_NO_STATIC_DATA;
          } else {
              mState = STATE_INITIALIZED;
          }
  
          baseRegisterPlayer(mSessionId);
          native_setPlayerIId(mPlayerIId); // mPlayerIId now ready to send to native AudioTrack.
      }
  1. android_media_AudioTraack.cpp中的android_media_AudioTrack_setup()方法详解。该方法涉及到JNI(java native interface),有关JNI语法相关知识请移步JNI基本语法
  • 检查sessionID
  • 检查采样率
  • 计算frameCount
  • 检查回调
  • 用创建的native AudioTrack调用set()方法进行初始化配置
  • 判断传输方式MODE_STREAM,MODE_STATIC
  /**
   * JNIEnv *env和jobject thiz是每个JNI方法都必须有的固定参数 
   * jobject jaa:AudioAttribute
   */
  android_media_AudioTrack_is_direct_output_supported(JNIEnv *env, jobject thiz,
                                             jobject jaa, jintArray jSampleRate,
                                             jint channelPositionMask, jint channelIndexMask,
                                             jint audioFormat, jint buffSizeInBytes, jint memoryMode,
                                             jintArray jSession, jlong nativeAudioTrack,
                                             jboolean offload, jint encapsulationMode,
                                             jobject tunerConfiguration, jstring opPackageName) {
      ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d,"
            " nativeAudioTrack=0x%" PRIX64 ", offload=%d encapsulationMode=%d tuner=%p",
            jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,
            nativeAudioTrack, offload, encapsulationMode, tunerConfiguration);
  
      if (jSession == NULL) {
          ALOGE("Error creating AudioTrack: invalid session ID pointer");
          return (jint) AUDIO_JAVA_ERROR;
      }
  
      const TunerConfigurationHelper tunerHelper(env, tunerConfiguration);
  	  //通过映射关系获取session
  	  //GetPrimitiveArrayCritical()获取一个指向原始数据类型内容的指针,在这里也就是jSession的内容
      jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
      if (nSession == NULL) {
          ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
          return (jint) AUDIO_JAVA_ERROR;
      }
      audio_session_t sessionId = (audio_session_t) nSession[0];
      //ReleasePrimitiveArrayCritical释放nSession,因为GetPrimitiveArrayCritical()可能使垃圾回收不能执行
      env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
      nSession = NULL;
  
      AudioTrackJniStorage* lpJniStorage = NULL;
  
      jclass clazz = env->GetObjectClass(thiz);
      if (clazz == NULL) {
          ALOGE("Can't find %s when setting up callback.", kClassPathName);
          return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
      }
  
      // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.
      //如果已经存在native的AudioTrack,就不需要再创建
      sp<AudioTrack> lpTrack;
      if (nativeAudioTrack == 0) {
          if (jaa == 0) {
              ALOGE("Error creating AudioTrack: invalid audio attributes");
              return (jint) AUDIO_JAVA_ERROR;
          }
  
          if (jSampleRate == 0) {
              ALOGE("Error creating AudioTrack: invalid sample rates");
              return (jint) AUDIO_JAVA_ERROR;
          }
  		  //GetIntArrayElements()获取int数组
          int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL);
          int sampleRateInHertz = sampleRates[0];
          //ReleaseIntArrayElements()释放int数组
          env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT);
  
          // Invalid channel representations are caught by !audio_is_output_channel() below.
          //将java格式表示的声道转换成native的
          audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(
                  channelPositionMask, channelIndexMask);
          //audio_is_output_channel是否是有效的声道配置
          if (!audio_is_output_channel(nativeChannelMask)) {
              ALOGE("Error creating AudioTrack: invalid native channel mask %#x.", nativeChannelMask);
              return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
          }
  		  //声道掩码转换成声道数
          uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);
  
          // check the format.
          // This function was called from Java, so we compare the format against the Java constants
          audio_format_t format = audioFormatToNative(audioFormat);
          if (format == AUDIO_FORMAT_INVALID) {
              ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);
              return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
          }
  
          // compute the frame count
          size_t frameCount;
          //如果是如果当前格式是线性的
          if (audio_has_proportional_frames(format)) {
          	  //bytesPerSample一个声道中一个数据单位的大小
              const size_t bytesPerSample = audio_bytes_per_sample(format);
              //缓冲区总的大小除以一帧的大小得到这个缓冲区一次性最多能容纳多少帧的数据
              frameCount = buffSizeInBytes / (channelCount * bytesPerSample);
          } else {
          	  //如果不是线性的,则帧数直接为当前buffer的大小
              frameCount = buffSizeInBytes;
          }
  
          // create the native AudioTrack object
          ScopedUtfChars opPackageNameStr(env, opPackageName);
          // TODO b/182469354: make consistent with AudioRecord
          //AttributeSourceState一个aidl对象
          AttributionSourceState attributionSource;
          //是谁创建的AudioTrack
          attributionSource.packageName = std::string(opPackageNameStr.c_str());
          attributionSource.token = sp<BBinder>::make();
          //new native的AudioTrack
          lpTrack = new AudioTrack(attributionSource);
  
          // read the AudioAttributes values
          auto paa = JNIAudioAttributeHelper::makeUnique();
          jint jStatus = JNIAudioAttributeHelper::nativeFromJava(env, jaa, paa.get());
          if (jStatus != (jint)AUDIO_JAVA_SUCCESS) {
              return jStatus;
          }
          ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",
                  paa->usage, paa->content_type, paa->flags, paa->tags);
  
          // initialize the callback information:
          // this data will be passed with every AudioTrack callback
          //初始化回调信息,当native发生变化时回调到java
          lpJniStorage = new AudioTrackJniStorage();
          //NewGlobalRef()新建一个clazz引用对象的全局引用
          lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
          // we use a weak reference so the AudioTrack object can be garbage collected.
          lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
          lpJniStorage->mCallbackData.isOffload = offload;
          lpJniStorage->mCallbackData.busy = false;
  		  //audio_offload_info_t offload相关配置信息
          audio_offload_info_t offloadInfo;
          //如果是offload格式,需要初始化offloadInfo
          if (offload == JNI_TRUE) {
              offloadInfo = AUDIO_INFO_INITIALIZER;
              offloadInfo.format = format;
              offloadInfo.sample_rate = sampleRateInHertz;
              offloadInfo.channel_mask = nativeChannelMask;
              offloadInfo.has_video = false;
              offloadInfo.stream_type = AUDIO_STREAM_MUSIC; //required for offload
          }
  
          if (encapsulationMode != 0) {
              offloadInfo = AUDIO_INFO_INITIALIZER;
              offloadInfo.format = format;
              offloadInfo.sample_rate = sampleRateInHertz;
              offloadInfo.channel_mask = nativeChannelMask;
              offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
              offloadInfo.encapsulation_mode =
                      static_cast<audio_encapsulation_mode_t>(encapsulationMode);
              offloadInfo.content_id = tunerHelper.getContentId();
              offloadInfo.sync_id = tunerHelper.getSyncId();
          }
  
          // initialize the native AudioTrack object
          status_t status = NO_ERROR;
          //判断播放模式,进行不同的初始化
          switch (memoryMode) {
          case MODE_STREAM:
              status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
                                                          // in paa (last argument)
                                    sampleRateInHertz,
                                    format, // word length, PCM
                                    nativeChannelMask, offload ? 0 : frameCount,
                                    offload ? AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
                                            : AUDIO_OUTPUT_FLAG_NONE,
                                    audioCallback,
                                    &(lpJniStorage->mCallbackData), // callback, callback data (user)
                                    0,    // notificationFrames == 0 since not using EVENT_MORE_DATA
                                          // to feed the AudioTrack
                                    0,    // shared mem
                                    true, // thread can call Java
                                    sessionId, // audio session ID
                                    offload ? AudioTrack::TRANSFER_SYNC_NOTIF_CALLBACK
                                            : AudioTrack::TRANSFER_SYNC,
                                    (offload || encapsulationMode) ? &offloadInfo : NULL,
                                    AttributionSourceState(), // default uid, pid values
                                    paa.get());
              break;
          case MODE_STATIC:
              // AudioTrack is using shared memory
              if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
                  ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
                  goto native_init_failure;
              }
              status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
                                                          // in paa (last argument)
                                    sampleRateInHertz,
                                    format, // word length, PCM
                                    nativeChannelMask, frameCount, AUDIO_OUTPUT_FLAG_NONE,
                                    audioCallback,
                                    &(lpJniStorage->mCallbackData), // callback, callback data (user)
                                    0, // notificationFrames == 0 since not using EVENT_MORE_DATA
                                       // to feed the AudioTrack
                                    lpJniStorage->mMemBase, // shared mem
                                    true,                   // thread can call Java
                                    sessionId,              // audio session ID
                                    AudioTrack::TRANSFER_SHARED,
                                    NULL,       // default offloadInfo
                                    AttributionSourceState(), // default uid, pid values
                                    paa.get());
              break;
          default:
              ALOGE("Unknown mode %d", memoryMode);
              goto native_init_failure;
          }
  
          if (status != NO_ERROR) {
              ALOGE("Error %d initializing AudioTrack", status);
              goto native_init_failure;
          }
          // Set caller name so it can be logged in destructor.
          // MediaMetricsConstants.h: AMEDIAMETRICS_PROP_CALLERNAME_VALUE_JAVA
          lpTrack->setCallerName("java");
      } else {  // end if (nativeAudioTrack == 0)
          lpTrack = (AudioTrack*)nativeAudioTrack;
          // TODO: We need to find out which members of the Java AudioTrack might
          // need to be initialized from the Native AudioTrack
          // these are directly returned from getters:
          //  mSampleRate
          //  mAudioFormat
          //  mStreamType
          //  mChannelConfiguration
          //  mChannelCount
          //  mState (?)
          //  mPlayState (?)
          // these may be used internally (Java AudioTrack.audioParamCheck():
          //  mChannelMask
          //  mChannelIndexMask
          //  mDataLoadMode
  
          // initialize the callback information:
          // this data will be passed with every AudioTrack callback
          lpJniStorage = new AudioTrackJniStorage();
          lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
          // we use a weak reference so the AudioTrack object can be garbage collected.
          lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
          lpJniStorage->mCallbackData.busy = false;
      }
      lpJniStorage->mAudioTrackCallback =
              new JNIAudioTrackCallback(env, thiz, lpJniStorage->mCallbackData.audioTrack_ref,
                                        javaAudioTrackFields.postNativeEventInJava);
      lpTrack->setAudioTrackCallback(lpJniStorage->mAudioTrackCallback);
  
      nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
      if (nSession == NULL) {
          ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
          goto native_init_failure;
      }
      // read the audio session ID back from AudioTrack in case we create a new session
      nSession[0] = lpTrack->getSessionId();
      env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
      nSession = NULL;
      {
          const jint elements[1] = { (jint) lpTrack->getSampleRate() };
          env->SetIntArrayRegion(jSampleRate, 0, 1, elements);
      }
      {   // scope for the lock
          Mutex::Autolock l(sLock);
          sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);
      }
      // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
      // of the Java object (in mNativeTrackInJavaObj)
      setAudioTrack(env, thiz, lpTrack);
      // save the JNI resources so we can free them later
      //ALOGV("storing lpJniStorage: %x\n", (long)lpJniStorage);
      env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);
      // since we had audio attributes, the stream type was derived from them during the
      // creation of the native AudioTrack: push the same value to the Java object
      env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());
      return (jint) AUDIO_JAVA_SUCCESS;
      // failures:
  native_init_failure:
      if (nSession != NULL) {
          env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
      }
      env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);
      env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);
      delete lpJniStorage;
      env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);
  
      // lpTrack goes out of scope, so reference count drops to zero
      return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
  }
  1. 配置native AudioTrack(AudioTrack.cpp)对象。
  • 判断data传输方式
  • 调用createTrack_l()
status_t AudioTrack::set(
        audio_stream_type_t streamType,//如:AUDIO_STREAM_MUSIC,AUDIO_STREAM_SYSTEM
        uint32_t sampleRate,//采样率
        audio_format_t format,//编解码格式
        audio_channel_mask_t channelMask,//声道配置
        size_t frameCount,//缓冲区帧数
        audio_output_flags_t flags,//如:AUDIO_OUTPUT_FAST,AUDIO_OUTPUT_DEEP_BUFFER
        //callback_t回调函数,当新的缓冲区可用或者出现其他情况时回调到java,对应JNI的audioCallback函数,传递进来的是一个函数指针
        callback_t cbf,
        //回调信息,对应JNI中的lpJniStorage->mCallbackData
        void* user,
        int32_t notificationFrames,//JNI传过来的都是0,因为没有使用EVENT_MODR_DATA
        //在JNI中MODE_STREAM,sharedBuffer为0,如果是MODE_STATIC,会在JNI时就初始化一块内存用于传输data
        const sp<IMemory>& sharedBuffer,//如果是MODE_STREAM,sharedBuffer为0
        bool threadCanCallJava,//是否能回调到java,一般情况都是true
        audio_session_t sessionId,
        //ransfer_type指定数据怎么传输到AudioTrack
        //TRANSFER_DEFAULT:没有指定,从其他参数获取
        //TRANSFER_CALLBACK:在AudioTrackThread线程通过audioCallback回调函数主动从应用程序那里获取data,ToneGenerator采用这种模式
        //TRANSFER_OBTAIN:需要调用obtainBuffer/releaseBuffer填充data
        //TRANSFER_SYNC:应用程序持续调用write()写data到FIFO,写数据时可能会阻塞(等待AudioFlinger::PlaybackThread消耗之前的data),适用所有情况,对应MODE_STREAM
        //TRANSFER_SHARED:应用程序将data一次性给AudioTrack,适用数据量小,延时要求高的情况,对应MODE_STATIC
        //FRANSFER_SYNC_NOTIF_CALLBACK:类似TRANSFER_SYNC,并且还有回调方法进行通信
        transfer_type transferType,
        const audio_offload_info_t *offloadInfo,
        const AttributionSourceState& attributionSource,
        const audio_attributes_t* pAttributes,
        bool doNotReconnect,
        float maxRequiredSpeed,
        audio_port_handle_t selectedDeviceId)//每个输入输出端口都会有一个身份ID
{
    status_t status;
    uint32_t channelCount;
    pid_t callingPid;
    pid_t myPid;
    uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
    pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));

    // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
    ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
          "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
          __func__,
          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
          sessionId, transferType, attributionSource.uid, attributionSource.pid);

    mThreadCanCallJava = threadCanCallJava;
    //初始化native AudioTrack对象时,会赋值AUDIO_PORT_HANDLE_NONE
    mSelectedDeviceId = selectedDeviceId;
    mSessionId = sessionId;

    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (sharedBuffer != 0) {
            //如果transferType没有指定,并且共享buffer不为0,则指定为TRANSFER_SHARED
            transferType = TRANSFER_SHARED;
        } else if (cbf == NULL || threadCanCallJava) {
            //如果没有回调就采用持续写入的方式
            transferType = TRANSFER_SYNC;
        } else {
        	//通过回调函数主动从应用程序获取data
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
    case TRANSFER_SYNC_NOTIF_CALLBACK://该模式必须要有回调,并且不是MODE_STATIC
        if (cbf == NULL || sharedBuffer != 0) {
            ALOGE("%s(): Transfer type %s but cbf == NULL || sharedBuffer != 0",
                    convertTransferToText(transferType), __func__);
            status = BAD_VALUE;
            goto exit;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC://该模式必须是MODE_STREAM
        if (sharedBuffer != 0) {
            ALOGE("%s(): Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
            status = BAD_VALUE;
            goto exit;
        }
        break;
    case TRANSFER_SHARED://该模式必须是MODE_STATIC
        if (sharedBuffer == 0) {
            ALOGE("%s(): Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
            status = BAD_VALUE;
            goto exit;
        }
        break;
    default:
        ALOGE("%s(): Invalid transfer type %d",
                __func__, transferType);
        status = BAD_VALUE;
        goto exit;
    }
    mSharedBuffer = sharedBuffer;
    mTransfer = transferType;
    mDoNotReconnect = doNotReconnect;

    ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
            __func__, sharedBuffer->unsecurePointer(), sharedBuffer->size());

    ALOGV("%s(): streamType %d frameCount %zu flags %04x",
            __func__, streamType, frameCount, flags);

    // invariant that mAudioTrack != 0 is true only after set() returns successfully
    //如果mAudioTrack != 0 为真,说明当前track已经成功调用过set()
    if (mAudioTrack != 0) {
        ALOGE("%s(): Track already in use", __func__);
        status = INVALID_OPERATION;
        goto exit;
    }

    // handle default values first.
    if (streamType == AUDIO_STREAM_DEFAULT) {
        streamType = AUDIO_STREAM_MUSIC;
    }
    if (pAttributes == NULL) {
        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
            ALOGE("%s(): Invalid stream type %d", __func__, streamType);
            status = BAD_VALUE;
            goto exit;
        }
        mOriginalStreamType = streamType;

    } else {
        // stream type shouldn't be looked at, this track has audio attributes
        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
        ALOGV("%s(): Building AudioTrack with attributes:"
                " usage=%d content=%d flags=0x%x tags=[%s]",
                __func__,
                 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
        mOriginalStreamType = AUDIO_STREAM_DEFAULT;
        audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
    }

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
        flags = static_cast<audio_output_flags_t>(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO);
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        ALOGE("%s(): Invalid format %#x", __func__, format);
        status = BAD_VALUE;
        goto exit;
    }
    mFormat = format;

    if (!audio_is_output_channel(channelMask)) {
        ALOGE("%s(): Invalid channel mask %#x",  __func__, channelMask);
        status = BAD_VALUE;
        goto exit;
    }
    mChannelMask = channelMask;
    channelCount = audio_channel_count_from_out_mask(channelMask);
    mChannelCount = channelCount;

    // force direct flag if format is not linear PCM
    // or offload was requested
    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
            || !audio_is_linear_pcm(format)) {
        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
                    ? "%s(): Offload request, forcing to Direct Output"
                    : "%s(): Not linear PCM, forcing to Direct Output",
                    __func__);
        flags = (audio_output_flags_t)
                // FIXME why can't we allow direct AND fast?
                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
    }

    // force direct flag if HW A/V sync requested
    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
    }

    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
        if (audio_has_proportional_frames(format)) {
            mFrameSize = channelCount * audio_bytes_per_sample(format);
        } else {
            mFrameSize = sizeof(uint8_t);
        }
    } else {
        ALOG_ASSERT(audio_has_proportional_frames(format));
        mFrameSize = channelCount * audio_bytes_per_sample(format);
        // createTrack will return an error if PCM format is not supported by server,
        // so no need to check for specific PCM formats here
    }

    // sampling rate must be specified for direct outputs
    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
        status = BAD_VALUE;
        goto exit;
    }
    mSampleRate = sampleRate;
    mOriginalSampleRate = sampleRate;
    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
    // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
    mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);

    // Make copy of input parameter offloadInfo so that in the future:
    //  (a) createTrack_l doesn't need it as an input parameter
    //  (b) we can support re-creation of offloaded tracks
    if (offloadInfo != NULL) {
        mOffloadInfoCopy = *offloadInfo;
        mOffloadInfo = &mOffloadInfoCopy;
    } else {
        mOffloadInfo = NULL;
        memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
        mOffloadInfoCopy = AUDIO_INFO_INITIALIZER;
    }

    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
    mSendLevel = 0.0f;
    // mFrameCount is initialized in createTrack_l
    mReqFrameCount = frameCount;
    if (notificationFrames >= 0) {
        mNotificationFramesReq = notificationFrames;
        mNotificationsPerBufferReq = 0;
    } else {
        if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
            ALOGE("%s(): notificationFrames=%d not permitted for non-fast track",
                    __func__, notificationFrames);
            status = BAD_VALUE;
            goto exit;
        }
        if (frameCount > 0) {
            ALOGE("%s(): notificationFrames=%d not permitted with non-zero frameCount=%zu",
                    __func__, notificationFrames, frameCount);
            status = BAD_VALUE;
            goto exit;
        }
        mNotificationFramesReq = 0;
        const uint32_t minNotificationsPerBuffer = 1;
        const uint32_t maxNotificationsPerBuffer = 8;
        mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
                max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
        ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
                "%s(): notificationFrames=%d clamped to the range -%u to -%u",
                __func__,
                notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
    }
    mNotificationFramesAct = 0;
    // TODO b/182392553: refactor or remove
    mClientAttributionSource = AttributionSourceState(attributionSource);
    callingPid = IPCThreadState::self()->getCallingPid();
    myPid = getpid();
    if (uid == -1 || (callingPid != myPid)) {
        mClientAttributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(
            IPCThreadState::self()->getCallingUid()));
    }
    if (pid == (pid_t)-1 || (callingPid != myPid)) {
        mClientAttributionSource.pid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(callingPid));
    }
    mAuxEffectId = 0;
    mOrigFlags = mFlags = flags;
    mCbf = cbf;

    if (cbf != NULL) {
        mAudioTrackThread = new AudioTrackThread(*this);
        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
        // thread begins in paused state, and will not reference us until start()
    }

    // create the IAudioTrack
    {
        AutoMutex lock(mLock);
        status = createTrack_l();
    }
    if (status != NO_ERROR) {
        if (mAudioTrackThread != 0) {
            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
            mAudioTrackThread->requestExitAndWait();
            mAudioTrackThread.clear();
        }
        goto exit;
    }

    mUserData = user;
    mLoopCount = 0;
    mLoopStart = 0;
    mLoopEnd = 0;
    mLoopCountNotified = 0;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    mPosition = 0;
    mReleased = 0;
    mStartNs = 0;
    mStartFromZeroUs = 0;
    AudioSystem::acquireAudioSessionId(mSessionId, pid, uid);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInUnderrun = false;
    mPreviousTimestampValid = false;
    mTimestampStartupGlitchReported = false;
    mTimestampRetrogradePositionReported = false;
    mTimestampRetrogradeTimeReported = false;
    mTimestampStallReported = false;
    mTimestampStaleTimeReported = false;
    mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
    mStartTs.mPosition = 0;
    mUnderrunCountOffset = 0;
    mFramesWritten = 0;
    mFramesWrittenServerOffset = 0;
    mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
    mVolumeHandler = new media::VolumeHandler();

exit:
    mStatus = status;
    return status;
}
  1. 配置完native AudioTrack后调用createTrack_l()
status_t AudioTrack::createTrack_l()
{
    status_t status;
    bool callbackAdded = false;
    //获取AudioFlinger
    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
    if (audioFlinger == 0) {
        ALOGE("%s(%d): Could not get audioflinger",
                __func__, mPortId);
        status = NO_INIT;
        goto exit;
    }

    {
    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
    // After fast request is denied, we will request again if IAudioTrack is re-created.
    // Client can only express a preference for FAST.  Server will perform additional tests.
    //判断是否是fast
    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
        // either of these use cases:
        // use case 1: shared buffer
        bool sharedBuffer = mSharedBuffer != 0;
        bool transferAllowed =
            // use case 2: callback transfer mode
            (mTransfer == TRANSFER_CALLBACK) ||
            // use case 3: obtain/release mode
            (mTransfer == TRANSFER_OBTAIN) ||
            // use case 4: synchronous write
            ((mTransfer == TRANSFER_SYNC || mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK)
                    && mThreadCanCallJava);

        bool fastAllowed = sharedBuffer || transferAllowed;
        if (!fastAllowed) {
            ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by client,"
                  " not shared buffer and transfer = %s",
                  __func__, mPortId,
                  convertTransferToText(mTransfer));
            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
        }
    }
	CreateTrackInput包含AudioTrack传递给AudioFlinger的所有参数,并非录音的input
    IAudioFlinger::CreateTrackInput input;
    if (mOriginalStreamType != AUDIO_STREAM_DEFAULT) {
        // Legacy: This is based on original parameters even if the track is recreated.
        input.attr = AudioSystem::streamTypeToAttributes(mOriginalStreamType);
    } else {
        input.attr = mAttributes;
    }
    input.config = AUDIO_CONFIG_INITIALIZER;
    input.config.sample_rate = mSampleRate;
    input.config.channel_mask = mChannelMask;
    input.config.format = mFormat;
    input.config.offload_info = mOffloadInfoCopy;
    input.clientInfo.attributionSource = mClientAttributionSource;
    input.clientInfo.clientTid = -1;
    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
        // It is currently meaningless to request SCHED_FIFO for a Java thread.  Even if the
        // application-level code follows all non-blocking design rules, the language runtime
        // doesn't also follow those rules, so the thread will not benefit overall.
        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
            input.clientInfo.clientTid = mAudioTrackThread->getTid();
        }
    }
    input.sharedBuffer = mSharedBuffer;
    input.notificationsPerBuffer = mNotificationsPerBufferReq;
    input.speed = 1.0;
    if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
            (mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
        input.speed  = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
                        max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
    }
    input.flags = mFlags;
    input.frameCount = mReqFrameCount;
    input.notificationFrameCount = mNotificationFramesReq;
    input.selectedDeviceId = mSelectedDeviceId;
    input.sessionId = mSessionId;
    input.audioTrackCallback = mAudioTrackCallback;

    media::CreateTrackResponse response;
    status = audioFlinger->createTrack(VALUE_OR_FATAL(input.toAidl()), response);
	//CreateTrackOutput包含AudioFlinger调用createTrack时返回给AudioTrack的所有参数
    IAudioFlinger::CreateTrackOutput output{};
    if (status == NO_ERROR) {
        output = VALUE_OR_FATAL(IAudioFlinger::CreateTrackOutput::fromAidl(response));
    }

    if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
        ALOGE("%s(%d): AudioFlinger could not create track, status: %d output %d",
                __func__, mPortId, status, output.outputId);
        if (status == NO_ERROR) {
            status = NO_INIT;
        }
        goto exit;
    }
    ALOG_ASSERT(output.audioTrack != 0);

    mFrameCount = output.frameCount;
    mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
    mRoutedDeviceId = output.selectedDeviceId;
    mSessionId = output.sessionId;
    mStreamType = output.streamType;

    mSampleRate = output.sampleRate;
    if (mOriginalSampleRate == 0) {
        mOriginalSampleRate = mSampleRate;
    }

    mAfFrameCount = output.afFrameCount;
    mAfSampleRate = output.afSampleRate;
    mAfLatency = output.afLatencyMs;

    mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;

    // AudioFlinger now owns the reference to the I/O handle,
    // so we are no longer responsible for releasing it.

    // FIXME compare to AudioRecord
    std::optional<media::SharedFileRegion> sfr;
    output.audioTrack->getCblk(&sfr);
    sp<IMemory> iMem = VALUE_OR_FATAL(aidl2legacy_NullableSharedFileRegion_IMemory(sfr));
    if (iMem == 0) {
        ALOGE("%s(%d): Could not get control block", __func__, mPortId);
        status = NO_INIT;
        goto exit;
    }
    // TODO: Using unsecurePointer() has some associated security pitfalls
    //       (see declaration for details).
    //       Either document why it is safe in this case or address the
    //       issue (e.g. by copying).
    void *iMemPointer = iMem->unsecurePointer();
    if (iMemPointer == NULL) {
        ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
        status = NO_INIT;
        goto exit;
    }
    // invariant that mAudioTrack != 0 is true only after set() returns successfully
    if (mAudioTrack != 0) {
        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
        mDeathNotifier.clear();
    }
    mAudioTrack = output.audioTrack;
    mCblkMemory = iMem;
    IPCThreadState::self()->flushCommands();

    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
    mCblk = cblk;

    mAwaitBoost = false;
    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
        if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
            ALOGI("%s(%d): AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
                  __func__, mPortId, mReqFrameCount, mFrameCount);
            if (!mThreadCanCallJava) {
                mAwaitBoost = true;
            }
        } else {
            ALOGD("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
                  __func__, mPortId, mReqFrameCount, mFrameCount);
        }
    }
    mFlags = output.flags;

    //mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
    if (mDeviceCallback != 0) {
        if (mOutput != AUDIO_IO_HANDLE_NONE) {
            AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
        }
        AudioSystem::addAudioDeviceCallback(this, output.outputId, output.portId);
        callbackAdded = true;
    }

    mPortId = output.portId;
    // We retain a copy of the I/O handle, but don't own the reference
    mOutput = output.outputId;
    mRefreshRemaining = true;

    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
    // is the value of pointer() for the shared buffer, otherwise buffers points
    // immediately after the control block.  This address is for the mapping within client
    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
    void* buffers;
    if (mSharedBuffer == 0) {
        buffers = cblk + 1;
    } else {
        // TODO: Using unsecurePointer() has some associated security pitfalls
        //       (see declaration for details).
        //       Either document why it is safe in this case or address the
        //       issue (e.g. by copying).
        buffers = mSharedBuffer->unsecurePointer();
        if (buffers == NULL) {
            ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
            status = NO_INIT;
            goto exit;
        }
    }

    mAudioTrack->attachAuxEffect(mAuxEffectId, &status);

    // If IAudioTrack is re-created, don't let the requested frameCount
    // decrease.  This can confuse clients that cache frameCount().
    if (mFrameCount > mReqFrameCount) {
        mReqFrameCount = mFrameCount;
    }

    // reset server position to 0 as we have new cblk.
    mServer = 0;

    // update proxy
    if (mSharedBuffer == 0) {
        mStaticProxy.clear();
        mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
    } else {
        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
        mProxy = mStaticProxy;
    }

    mProxy->setVolumeLR(gain_minifloat_pack(
            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));

    mProxy->setSendLevel(mSendLevel);
    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
    mProxy->setSampleRate(effectiveSampleRate);

    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
    playbackRateTemp.mSpeed = effectiveSpeed;
    playbackRateTemp.mPitch = effectivePitch;
    mProxy->setPlaybackRate(playbackRateTemp);
    mProxy->setMinimum(mNotificationFramesAct);

    if (mDualMonoMode != AUDIO_DUAL_MONO_MODE_OFF) {
        setDualMonoMode_l(mDualMonoMode);
    }
    if (mAudioDescriptionMixLeveldB != -std::numeric_limits<float>::infinity()) {
        setAudioDescriptionMixLevel_l(mAudioDescriptionMixLeveldB);
    }

    mDeathNotifier = new DeathNotifier(this);
    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);

    // This is the first log sent from the AudioTrack client.
    // The creation of the audio track by AudioFlinger (in the code above)
    // is the first log of the AudioTrack and must be present before
    // any AudioTrack client logs will be accepted.

    mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(mPortId);
    mediametrics::LogItem(mMetricsId)
        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE)
        // the following are immutable
        .set(AMEDIAMETRICS_PROP_FLAGS, toString(mFlags).c_str())
        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
        .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
        .set(AMEDIAMETRICS_PROP_LOGSESSIONID, mLogSessionId)
        .set(AMEDIAMETRICS_PROP_PLAYERIID, mPlayerIId)
        .set(AMEDIAMETRICS_PROP_TRACKID, mPortId) // dup from key
        .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
        .set(AMEDIAMETRICS_PROP_USAGE, toString(mAttributes.usage).c_str())
        .set(AMEDIAMETRICS_PROP_THREADID, (int32_t)output.outputId)
        .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
        .set(AMEDIAMETRICS_PROP_ROUTEDDEVICEID, (int32_t)mRoutedDeviceId)
        .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
        .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
        .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
        // the following are NOT immutable
        .set(AMEDIAMETRICS_PROP_VOLUME_LEFT, (double)mVolume[AUDIO_INTERLEAVE_LEFT])
        .set(AMEDIAMETRICS_PROP_VOLUME_RIGHT, (double)mVolume[AUDIO_INTERLEAVE_RIGHT])
        .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
        .set(AMEDIAMETRICS_PROP_AUXEFFECTID, (int32_t)mAuxEffectId)
        .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
        .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
        .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
        .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
                AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)effectiveSampleRate)
        .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
                AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)effectiveSpeed)
        .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
                AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)effectivePitch)
        .record();

    // mSendLevel
    // mReqFrameCount?
    // mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq
    // mLatency, mAfLatency, mAfFrameCount, mAfSampleRate

    }

exit:
    if (status != NO_ERROR && callbackAdded) {
        // note: mOutput is always valid is callbackAdded is true
        AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
    }

    mStatus = status;

    // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
    return status;
}


  • 0
    点赞
  • 16
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值