Android13 CreateTrack流程分析

进行AudioTrack相关操作前需要先创建一个Track,如下为CreateTrack的流程:

首先在AudioTrack的构造函数中会调用native_setup方法:

//frameworks/base/media/java/android/media/AudioTrack.java
    public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
            int mode, int sessionId)
                    throws IllegalArgumentException {
        this(attributes, format, bufferSizeInBytes, mode, sessionId, false /*offload*/,
                ENCAPSULATION_MODE_NONE, null /* tunerConfiguration */);
    }


    private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
            int mode, int sessionId, boolean offload, int encapsulationMode,
            @Nullable TunerConfiguration tunerConfiguration)
                    throws IllegalArgumentException {
        super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
        // mState already == STATE_UNINITIALIZED


        mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.


        if (format == null) {
            throw new IllegalArgumentException("Illegal null AudioFormat");
        }


        // Check if we should enable deep buffer mode
        if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
            mAttributes = new AudioAttributes.Builder(mAttributes)
                .replaceFlags((mAttributes.getAllFlags()
                        | AudioAttributes.FLAG_DEEP_BUFFER)
                        & ~AudioAttributes.FLAG_LOW_LATENCY)
                .build();
        }


        // remember which looper is associated with the AudioTrack instantiation
        Looper looper;
        if ((looper = Looper.myLooper()) == null) {
            looper = Looper.getMainLooper();
        }


        int rate = format.getSampleRate();
        if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
            rate = 0;
        }


        int channelIndexMask = 0;
        if ((format.getPropertySetMask()
                & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
            channelIndexMask = format.getChannelIndexMask();
        }
        int channelMask = 0;
        if ((format.getPropertySetMask()
                & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
            channelMask = format.getChannelMask();
        } else if (channelIndexMask == 0) { // if no masks at all, use stereo
            channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
                    | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
        }
        int encoding = AudioFormat.ENCODING_DEFAULT;
        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
            encoding = format.getEncoding();
        }
        audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
        mOffloaded = offload;
        mStreamType = AudioSystem.STREAM_DEFAULT;


        audioBuffSizeCheck(bufferSizeInBytes);


        mInitializationLooper = looper;


        if (sessionId < 0) {
            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
        }


        int[] sampleRate = new int[] {mSampleRate};
        int[] session = new int[1];
        session[0] = sessionId;
        // native initialization
        int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
                sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
                mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
                offload, encapsulationMode, tunerConfiguration,
                getCurrentOpPackageName()); //调用native_setup方法
        if (initResult != SUCCESS) {
            loge("Error code "+initResult+" when initializing AudioTrack.");
            return; // with mState == STATE_UNINITIALIZED
        }


        mSampleRate = sampleRate[0];
        mSessionId = session[0];


        // TODO: consider caching encapsulationMode and tunerConfiguration in the Java object.


        if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
            int frameSizeInBytes;
            if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
                frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
            } else {
                frameSizeInBytes = 1;
            }
            mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
        }


        if (mDataLoadMode == MODE_STATIC) {
            mState = STATE_NO_STATIC_DATA;
        } else {
            mState = STATE_INITIALIZED;
        }


        baseRegisterPlayer(mSessionId);
        native_setPlayerIId(mPlayerIId); // mPlayerIId now ready to send to native AudioTrack.
    }

通过查询JNI代码android_media_AudioTrack.cpp得出调用的函数是android_media_AudioTrack_setup:

//frameworks/base/core/jni/android_media_AudioTrack.cpp
static jint android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
                                           jobject jaa, jintArray jSampleRate,
                                           jint channelPositionMask, jint channelIndexMask,
                                           jint audioFormat, jint buffSizeInBytes, jint memoryMode,
                                           jintArray jSession, jlong nativeAudioTrack,
                                           jboolean offload, jint encapsulationMode,
                                           jobject tunerConfiguration, jstring opPackageName) {
    ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d,"
          " nativeAudioTrack=0x%" PRIX64 ", offload=%d encapsulationMode=%d tuner=%p",
          jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,
          nativeAudioTrack, offload, encapsulationMode, tunerConfiguration);


    if (jSession == NULL) {
        ALOGE("Error creating AudioTrack: invalid session ID pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }


    const TunerConfigurationHelper tunerHelper(env, tunerConfiguration);


    jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }
    audio_session_t sessionId = (audio_session_t) nSession[0];
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;


    jclass clazz = env->GetObjectClass(thiz);
    if (clazz == NULL) {
        ALOGE("Can't find %s when setting up callback.", kClassPathName);
        return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
    }


    // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.
    sp<AudioTrack> lpTrack;
    const auto lpJniStorage = sp<AudioTrackJniStorage>::make(clazz, weak_this, offload);
    if (nativeAudioTrack == 0) {
        if (jaa == 0) {
            ALOGE("Error creating AudioTrack: invalid audio attributes");
            return (jint) AUDIO_JAVA_ERROR;
        }


        if (jSampleRate == 0) {
            ALOGE("Error creating AudioTrack: invalid sample rates");
            return (jint) AUDIO_JAVA_ERROR;
        }


        int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL);
        int sampleRateInHertz = sampleRates[0];
        env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT);


        // Invalid channel representations are caught by !audio_is_output_channel() below.
        audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(
                channelPositionMask, channelIndexMask);
        if (!audio_is_output_channel(nativeChannelMask)) {
            ALOGE("Error creating AudioTrack: invalid native channel mask %#x.", nativeChannelMask);
            return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
        }


        uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);


        // check the format.
        // This function was called from Java, so we compare the format against the Java constants
        audio_format_t format = audioFormatToNative(audioFormat);
        if (format == AUDIO_FORMAT_INVALID) {
            ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);
            return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
        }


        // compute the frame count
        size_t frameCount;
        if (audio_has_proportional_frames(format)) {
            const size_t bytesPerSample = audio_bytes_per_sample(format);
            frameCount = buffSizeInBytes / (channelCount * bytesPerSample);
        } else {
            frameCount = buffSizeInBytes;
        }


        // create the native AudioTrack object
        ScopedUtfChars opPackageNameStr(env, opPackageName);
        // TODO b/182469354: make consistent with AudioRecord
        AttributionSourceState attributionSource;
        attributionSource.packageName = std::string(opPackageNameStr.c_str());
        attributionSource.token = sp<BBinder>::make();
        lpTrack = sp<AudioTrack>::make(attributionSource); //创建AudioTrack


        // read the AudioAttributes values
        auto paa = JNIAudioAttributeHelper::makeUnique();
        jint jStatus = JNIAudioAttributeHelper::nativeFromJava(env, jaa, paa.get());
        if (jStatus != (jint)AUDIO_JAVA_SUCCESS) {
            return jStatus;
        }
        ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",
                paa->usage, paa->content_type, paa->flags, paa->tags);


        // initialize the callback information:
        // this data will be passed with every AudioTrack callback
        audio_offload_info_t offloadInfo;
        if (offload == JNI_TRUE) {
            offloadInfo = AUDIO_INFO_INITIALIZER;
            offloadInfo.format = format;
            offloadInfo.sample_rate = sampleRateInHertz;
            offloadInfo.channel_mask = nativeChannelMask;
            offloadInfo.has_video = false;
            offloadInfo.stream_type = AUDIO_STREAM_MUSIC; //required for offload
        }


        if (encapsulationMode != 0) {
            offloadInfo = AUDIO_INFO_INITIALIZER;
            offloadInfo.format = format;
            offloadInfo.sample_rate = sampleRateInHertz;
            offloadInfo.channel_mask = nativeChannelMask;
            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
            offloadInfo.encapsulation_mode =
                    static_cast<audio_encapsulation_mode_t>(encapsulationMode);
            offloadInfo.content_id = tunerHelper.getContentId();
            offloadInfo.sync_id = tunerHelper.getSyncId();
        }


        // initialize the native AudioTrack object
        status_t status = NO_ERROR;
        switch (memoryMode) {
        case MODE_STREAM:
     //调用AudioTrack的set函数
            status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
                                                        // in paa (last argument)
                                  sampleRateInHertz,
                                  format, // word length, PCM
                                  nativeChannelMask, offload ? 0 : frameCount,
                                  offload ? AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
                                          : AUDIO_OUTPUT_FLAG_NONE,
                                  lpJniStorage,
                                  0,    // notificationFrames == 0 since not using EVENT_MORE_DATA
                                        // to feed the AudioTrack
                                  0,    // shared mem
                                  true, // thread can call Java
                                  sessionId, // audio session ID
                                  offload ? AudioTrack::TRANSFER_SYNC_NOTIF_CALLBACK
                                          : AudioTrack::TRANSFER_SYNC,
                                  (offload || encapsulationMode) ? &offloadInfo : NULL,
                                  AttributionSourceState(), // default uid, pid values
                                  paa.get());
            break;


        case MODE_STATIC:
        {
            // AudioTrack is using shared memory
            const auto iMem = allocSharedMem(buffSizeInBytes);
            if (iMem == nullptr) {
                ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
                goto native_init_failure;
            }


            status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
                                                        // in paa (last argument)
                                  sampleRateInHertz,
                                  format, // word length, PCM
                                  nativeChannelMask, frameCount, AUDIO_OUTPUT_FLAG_NONE,
                                  lpJniStorage,
                                  0, // notificationFrames == 0 since not using EVENT_MORE_DATA
                                     // to feed the AudioTrack
                                  iMem,                   // shared mem
                                  true,                   // thread can call Java
                                  sessionId,              // audio session ID
                                  AudioTrack::TRANSFER_SHARED,
                                  nullptr ,               // default offloadInfo
                                  AttributionSourceState(), // default uid, pid values
                                  paa.get());
            break;
        }
        default:
            ALOGE("Unknown mode %d", memoryMode);
            goto native_init_failure;
        }


        if (status != NO_ERROR) {
            ALOGE("Error %d initializing AudioTrack", status);
            goto native_init_failure;
        }
        // Set caller name so it can be logged in destructor.
        // MediaMetricsConstants.h: AMEDIAMETRICS_PROP_CALLERNAME_VALUE_JAVA
        lpTrack->setCallerName("java");
    } else {  // end if (nativeAudioTrack == 0)
        lpTrack = sp<AudioTrack>::fromExisting(reinterpret_cast<AudioTrack*>(nativeAudioTrack));
        // TODO: We need to find out which members of the Java AudioTrack might
        // need to be initialized from the Native AudioTrack
        // these are directly returned from getters:
        //  mSampleRate
        //  mAudioFormat
        //  mStreamType
        //  mChannelConfiguration
        //  mChannelCount
        //  mState (?)
        //  mPlayState (?)
        // these may be used internally (Java AudioTrack.audioParamCheck():
        //  mChannelMask
        //  mChannelIndexMask
        //  mDataLoadMode


        // initialize the callback information:
        // this data will be passed with every AudioTrack callback


        // TODO this callback information is useless, it isn't passed to the
        // native AudioTrack object
        /*
        lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
        // we use a weak reference so the AudioTrack object can be garbage collected.
        lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
        lpJniStorage->mCallbackData.busy = false;
        */
    }
    lpJniStorage->mAudioTrackCallback =
            sp<JNIAudioTrackCallback>::make(env, thiz, lpJniStorage->getAudioTrackWeakRef(),
                                            javaAudioTrackFields.postNativeEventInJava);
    lpTrack->setAudioTrackCallback(lpJniStorage->mAudioTrackCallback); //设置回调


    nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        goto native_init_failure;
    }
    // read the audio session ID back from AudioTrack in case we create a new session
    nSession[0] = lpTrack->getSessionId();
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;


    {
        const jint elements[1] = { (jint) lpTrack->getSampleRate() };
        env->SetIntArrayRegion(jSampleRate, 0, 1, elements);
    }


    // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
    // of the Java object (in mNativeTrackInJavaObj)
    setFieldSp(env, thiz, lpTrack, javaAudioTrackFields.nativeTrackInJavaObj);


    // save the JNI resources so we can free them later
    //ALOGV("storing lpJniStorage: %x\n", (long)lpJniStorage);
    setFieldSp(env, thiz, lpJniStorage, javaAudioTrackFields.jniData);


    // since we had audio attributes, the stream type was derived from them during the
    // creation of the native AudioTrack: push the same value to the Java object
    env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());


    return (jint) AUDIO_JAVA_SUCCESS;


    // failures:
native_init_failure:
    if (nSession != NULL) {
        env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    }


    setFieldSp(env, thiz, sp<AudioTrack>{}, javaAudioTrackFields.nativeTrackInJavaObj);
    setFieldSp(env, thiz, sp<AudioTrackJniStorage>{}, javaAudioTrackFields.jniData);
    // lpTrack goes out of scope, so reference count drops to zero
    return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}

在android_media_AudioTrack_setup函数中会创建AudioTrack并调用AudioTrack的set函数:

//frameworks/av/media/libaudioclient/AudioTrack.cpp
status_t AudioTrack::set(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        audio_output_flags_t flags,
        const wp<IAudioTrackCallback>& callback,
        int32_t notificationFrames,
        const sp<IMemory>& sharedBuffer,
        bool threadCanCallJava,
        audio_session_t sessionId,
        transfer_type transferType,
        const audio_offload_info_t *offloadInfo,
        const AttributionSourceState& attributionSource,
        const audio_attributes_t* pAttributes,
        bool doNotReconnect,
        float maxRequiredSpeed,
        audio_port_handle_t selectedDeviceId)
{
    LOG_ALWAYS_FATAL_IF(mInitialized, "%s: should not be called twice", __func__);
    mInitialized = true;
    status_t status;
    uint32_t channelCount;
    pid_t callingPid;
    pid_t myPid;
    uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
    pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
    std::string errorMessage;
    // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
    ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
          "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
          __func__,
          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
          sessionId, transferType, attributionSource.uid, attributionSource.pid);


    mThreadCanCallJava = threadCanCallJava;


    // These variables are pulled in an error report, so we initialize them early.
    mSelectedDeviceId = selectedDeviceId;
    mSessionId = sessionId;
    mChannelMask = channelMask;
    mReqFrameCount = mFrameCount = frameCount;
    mSampleRate = sampleRate;
    mOriginalSampleRate = sampleRate;
    mAttributes = pAttributes != nullptr ? *pAttributes : AUDIO_ATTRIBUTES_INITIALIZER;
    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;


    // update format and flags before storing them in mFormat, mOrigFlags and mFlags
    if (pAttributes != NULL) {
        // stream type shouldn't be looked at, this track has audio attributes
        ALOGV("%s(): Building AudioTrack with attributes:"
                " usage=%d content=%d flags=0x%x tags=[%s]",
                __func__,
                 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
        audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
    }


    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
        flags = static_cast<audio_output_flags_t>(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO);
    }


    // force direct flag if format is not linear PCM
    // or offload was requested
    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
            || !audio_is_linear_pcm(format)) {
        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
                    ? "%s(): Offload request, forcing to Direct Output"
                    : "%s(): Not linear PCM, forcing to Direct Output",
                    __func__);
        flags = (audio_output_flags_t)
                // FIXME why can't we allow direct AND fast?
                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
    }


    // force direct flag if HW A/V sync requested
    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
    }


    mFormat = format;
    mOrigFlags = mFlags = flags;


    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (sharedBuffer != 0) {
            transferType = TRANSFER_SHARED;
        } else if (callback == nullptr|| threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
    case TRANSFER_SYNC_NOTIF_CALLBACK:
        if (callback == nullptr || sharedBuffer != 0) {
            errorMessage = StringPrintf(
                    "%s: Transfer type %s but callback == nullptr || sharedBuffer != 0",
                    convertTransferToText(transferType), __func__);
            status = BAD_VALUE;
            goto error;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        if (sharedBuffer != 0) {
            errorMessage = StringPrintf(
                    "%s: Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
            status = BAD_VALUE;
            goto error;
        }
        break;
    case TRANSFER_SHARED:
        if (sharedBuffer == 0) {
            errorMessage = StringPrintf(
                    "%s: Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
            status = BAD_VALUE;
            goto error;
        }
        break;
    default:
        errorMessage = StringPrintf("%s: Invalid transfer type %d", __func__, transferType);
        status = BAD_VALUE;
        goto error;
    }
    mSharedBuffer = sharedBuffer;
    mTransfer = transferType;
    mDoNotReconnect = doNotReconnect;


    ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
            __func__, sharedBuffer->unsecurePointer(), sharedBuffer->size());


    // invariant that mAudioTrack != 0 is true only after set() returns successfully
    if (mAudioTrack != 0) {
        errorMessage = StringPrintf("%s: Track already in use", __func__);
        status = INVALID_OPERATION;
        goto error;
    }


    // handle default values first.
    if (streamType == AUDIO_STREAM_DEFAULT) {
        streamType = AUDIO_STREAM_MUSIC;
    }
    if (pAttributes == NULL) {
        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
            errorMessage = StringPrintf("%s: Invalid stream type %d", __func__, streamType);
            status = BAD_VALUE;
            goto error;
        }
        mOriginalStreamType = streamType;
    } else {
        mOriginalStreamType = AUDIO_STREAM_DEFAULT;
    }


    // validate parameters
    if (!audio_is_valid_format(format)) {
        errorMessage = StringPrintf("%s: Invalid format %#x", __func__, format);
        status = BAD_VALUE;
        goto error;
    }


    if (!audio_is_output_channel(channelMask)) {
        errorMessage = StringPrintf("%s: Invalid channel mask %#x",  __func__, channelMask);
        status = BAD_VALUE;
        goto error;
    }
    channelCount = audio_channel_count_from_out_mask(channelMask);
    mChannelCount = channelCount;


    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
        if (audio_has_proportional_frames(format)) {
            mFrameSize = channelCount * audio_bytes_per_sample(format);
        } else {
            mFrameSize = sizeof(uint8_t);
        }
    } else {
        ALOG_ASSERT(audio_has_proportional_frames(format));
        mFrameSize = channelCount * audio_bytes_per_sample(format);
        // createTrack will return an error if PCM format is not supported by server,
        // so no need to check for specific PCM formats here
    }


    // sampling rate must be specified for direct outputs
    if (sampleRate == 0 && (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
        errorMessage = StringPrintf(
                "%s: sample rate must be specified for direct outputs", __func__);
        status = BAD_VALUE;
        goto error;
    }
    // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
    mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);


    // Make copy of input parameter offloadInfo so that in the future:
    //  (a) createTrack_l doesn't need it as an input parameter
    //  (b) we can support re-creation of offloaded tracks
    if (offloadInfo != NULL) {
        mOffloadInfoCopy = *offloadInfo;
    } else {
        memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
        mOffloadInfoCopy = AUDIO_INFO_INITIALIZER;
        mOffloadInfoCopy.format = format;
        mOffloadInfoCopy.sample_rate = sampleRate;
        mOffloadInfoCopy.channel_mask = channelMask;
        mOffloadInfoCopy.stream_type = streamType;
    }


    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
    mSendLevel = 0.0f;
    // mFrameCount is initialized in createTrack_l
    if (notificationFrames >= 0) {
        mNotificationFramesReq = notificationFrames;
        mNotificationsPerBufferReq = 0;
    } else {
        if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
            errorMessage = StringPrintf(
                    "%s: notificationFrames=%d not permitted for non-fast track",
                    __func__, notificationFrames);
            status = BAD_VALUE;
            goto error;
        }
        if (frameCount > 0) {
            ALOGE("%s(): notificationFrames=%d not permitted with non-zero frameCount=%zu",
                    __func__, notificationFrames, frameCount);
            status = BAD_VALUE;
            goto error;
        }
        mNotificationFramesReq = 0;
        const uint32_t minNotificationsPerBuffer = 1;
        const uint32_t maxNotificationsPerBuffer = 8;
        mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
                max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
        ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
                "%s(): notificationFrames=%d clamped to the range -%u to -%u",
                __func__,
                notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
    }
    mNotificationFramesAct = 0;
    // TODO b/182392553: refactor or remove
    mClientAttributionSource = AttributionSourceState(attributionSource);
    callingPid = IPCThreadState::self()->getCallingPid();
    myPid = getpid();
    if (uid == -1 || (callingPid != myPid)) {
        mClientAttributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(
            IPCThreadState::self()->getCallingUid()));
    }
    if (pid == (pid_t)-1 || (callingPid != myPid)) {
        mClientAttributionSource.pid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(callingPid));
    }
    mAuxEffectId = 0;
    mCallback = callback;


    if (callback != nullptr) {
        mAudioTrackThread = sp<AudioTrackThread>::make(*this); //创建AudioTrack线程
        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/); //运行AudioTrack线程
        // thread begins in paused state, and will not reference us until start()
    }


    // create the IAudioTrack
    {
        AutoMutex lock(mLock);
        status = createTrack_l(); 创建IAudioTrack
    }
    if (status != NO_ERROR) {
        if (mAudioTrackThread != 0) {
            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
            mAudioTrackThread->requestExitAndWait();
            mAudioTrackThread.clear();
        }
        // We do not goto error to prevent double-logging errors.
        goto exit;
    }


    mLoopCount = 0;
    mLoopStart = 0;
    mLoopEnd = 0;
    mLoopCountNotified = 0;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    mPosition = 0;
    mReleased = 0;
    mStartNs = 0;
    mStartFromZeroUs = 0;
    AudioSystem::acquireAudioSessionId(mSessionId, pid, uid);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInUnderrun = false;
    mPreviousTimestampValid = false;
    mTimestampStartupGlitchReported = false;
    mTimestampRetrogradePositionReported = false;
    mTimestampRetrogradeTimeReported = false;
    mTimestampStallReported = false;
    mTimestampStaleTimeReported = false;
    mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
    mStartTs.mPosition = 0;
    mUnderrunCountOffset = 0;
    mFramesWritten = 0;
    mFramesWrittenServerOffset = 0;
    mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
    mVolumeHandler = new media::VolumeHandler();


error:
    if (status != NO_ERROR) {
        ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
        reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
    }
    // fall through
exit:
    mStatus = status;
    return status;
}

这个函数会创建并运行AudioTrack线程,然后调用createTrack_l创建IAudioTrack:

//frameworks/av/media/libaudioclient/AudioTrack.cpp
status_t AudioTrack::createTrack_l()
{
    status_t status;
    bool callbackAdded = false;
    std::string errorMessage;


    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger(); IAudioFlinger接口的服务端为audioFlinger ,通过AudioSystem取得
    if (audioFlinger == 0) {
        errorMessage = StringPrintf("%s(%d): Could not get audioflinger",
                __func__, mPortId);
        status = DEAD_OBJECT;
        goto exit;
    }


    {
    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
    // After fast request is denied, we will request again if IAudioTrack is re-created.
    // Client can only express a preference for FAST.  Server will perform additional tests.
    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
        // either of these use cases:
        // use case 1: shared buffer
        bool sharedBuffer = mSharedBuffer != 0;
        bool transferAllowed =
            // use case 2: callback transfer mode
            (mTransfer == TRANSFER_CALLBACK) ||
            // use case 3: obtain/release mode
            (mTransfer == TRANSFER_OBTAIN) ||
            // use case 4: synchronous write
            ((mTransfer == TRANSFER_SYNC || mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK)
                    && mThreadCanCallJava);


        bool fastAllowed = sharedBuffer || transferAllowed;
        if (!fastAllowed) {
            ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by client,"
                  " not shared buffer and transfer = %s",
                  __func__, mPortId,
                  convertTransferToText(mTransfer));
            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
        }
    }


    IAudioFlinger::CreateTrackInput input; //配置Track输入
    if (mOriginalStreamType != AUDIO_STREAM_DEFAULT) {
        // Legacy: This is based on original parameters even if the track is recreated.
        input.attr = AudioSystem::streamTypeToAttributes(mOriginalStreamType);
    } else {
        input.attr = mAttributes;
    }
    input.config = AUDIO_CONFIG_INITIALIZER;
    input.config.sample_rate = mSampleRate;
    input.config.channel_mask = mChannelMask;
    input.config.format = mFormat;
    input.config.offload_info = mOffloadInfoCopy;
    input.clientInfo.attributionSource = mClientAttributionSource;
    input.clientInfo.clientTid = -1;
    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
        // It is currently meaningless to request SCHED_FIFO for a Java thread.  Even if the
        // application-level code follows all non-blocking design rules, the language runtime
        // doesn't also follow those rules, so the thread will not benefit overall.
        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
            input.clientInfo.clientTid = mAudioTrackThread->getTid();
        }
    }
    input.sharedBuffer = mSharedBuffer;
    input.notificationsPerBuffer = mNotificationsPerBufferReq;
    input.speed = 1.0;
    if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
            (mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
        input.speed  = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
                        max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
    }
    input.flags = mFlags;
    input.frameCount = mReqFrameCount;
    input.notificationFrameCount = mNotificationFramesReq;
    input.selectedDeviceId = mSelectedDeviceId;
    input.sessionId = mSessionId;
    input.audioTrackCallback = mAudioTrackCallback;


    media::CreateTrackResponse response;
    status = audioFlinger->createTrack(VALUE_OR_FATAL(input.toAidl()), response); 调用audioFlinger的createTrack函数创建PlaybackTrack


    IAudioFlinger::CreateTrackOutput output{}; //创建Track输出
    if (status == NO_ERROR) {
        output = VALUE_OR_FATAL(IAudioFlinger::CreateTrackOutput::fromAidl(response));
    }


    if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
        errorMessage = StringPrintf(
                "%s(%d): AudioFlinger could not create track, status: %d output %d",
                __func__, mPortId, status, output.outputId);
        if (status == NO_ERROR) {
            status = INVALID_OPERATION; // device not ready
        }
        goto exit;
    }
    ALOG_ASSERT(output.audioTrack != 0);


    mFrameCount = output.frameCount;
    mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
    mRoutedDeviceId = output.selectedDeviceId;
    mSessionId = output.sessionId;
    mStreamType = output.streamType;


    mSampleRate = output.sampleRate;
    if (mOriginalSampleRate == 0) {
        mOriginalSampleRate = mSampleRate;
    }


    mAfFrameCount = output.afFrameCount;
    mAfSampleRate = output.afSampleRate;
    mAfLatency = output.afLatencyMs;


    mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;


    // AudioFlinger now owns the reference to the I/O handle,
    // so we are no longer responsible for releasing it.


    // FIXME compare to AudioRecord
    std::optional<media::SharedFileRegion> sfr;
    output.audioTrack->getCblk(&sfr);
    sp<IMemory> iMem = VALUE_OR_FATAL(aidl2legacy_NullableSharedFileRegion_IMemory(sfr));
    if (iMem == 0) {
        errorMessage = StringPrintf("%s(%d): Could not get control block", __func__, mPortId);
        status = FAILED_TRANSACTION;
        goto exit;
    }
    // TODO: Using unsecurePointer() has some associated security pitfalls
    //       (see declaration for details).
    //       Either document why it is safe in this case or address the
    //       issue (e.g. by copying).
    void *iMemPointer = iMem->unsecurePointer();
    if (iMemPointer == NULL) {
        errorMessage = StringPrintf(
                "%s(%d): Could not get control block pointer", __func__, mPortId);
        status = FAILED_TRANSACTION;
        goto exit;
    }
    // invariant that mAudioTrack != 0 is true only after set() returns successfully
    if (mAudioTrack != 0) {
        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
        mDeathNotifier.clear();
    }
    mAudioTrack = output.audioTrack;
    mCblkMemory = iMem;
    IPCThreadState::self()->flushCommands();


    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
    mCblk = cblk;


    mAwaitBoost = false;
    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
        if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
            ALOGI("%s(%d): AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
                  __func__, mPortId, mReqFrameCount, mFrameCount);
            if (!mThreadCanCallJava) {
                mAwaitBoost = true;
            }
        } else {
            ALOGV("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
                  __func__, mPortId, mReqFrameCount, mFrameCount);
        }
    }
    mFlags = output.flags;


    //mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
    if (mDeviceCallback != 0) {
        if (mOutput != AUDIO_IO_HANDLE_NONE) {
            AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
        }
        AudioSystem::addAudioDeviceCallback(this, output.outputId, output.portId);
        callbackAdded = true;
    }


    mPortId = output.portId;
    // We retain a copy of the I/O handle, but don't own the reference
    mOutput = output.outputId;
    mRefreshRemaining = true;


    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
    // is the value of pointer() for the shared buffer, otherwise buffers points
    // immediately after the control block.  This address is for the mapping within client
    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
    void* buffers;
    if (mSharedBuffer == 0) {
        buffers = cblk + 1;
    } else {
        // TODO: Using unsecurePointer() has some associated security pitfalls
        //       (see declaration for details).
        //       Either document why it is safe in this case or address the
        //       issue (e.g. by copying).
        buffers = mSharedBuffer->unsecurePointer();
        if (buffers == NULL) {
            errorMessage = StringPrintf(
                    "%s(%d): Could not get buffer pointer", __func__, mPortId);
            ALOGE("%s", errorMessage.c_str());
            status = FAILED_TRANSACTION;
            goto exit;
        }
    }


    mAudioTrack->attachAuxEffect(mAuxEffectId, &status);


    // If IAudioTrack is re-created, don't let the requested frameCount
    // decrease.  This can confuse clients that cache frameCount().
    if (mFrameCount > mReqFrameCount) {
        mReqFrameCount = mFrameCount;
    }


    // reset server position to 0 as we have new cblk.
    mServer = 0;


    // update proxy
    if (mSharedBuffer == 0) {
        mStaticProxy.clear();
        mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize); 
    } else {
        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize); 
        mProxy = mStaticProxy;
    }


    mProxy->setVolumeLR(gain_minifloat_pack(
            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));


    mProxy->setSendLevel(mSendLevel);
    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
    mProxy->setSampleRate(effectiveSampleRate);


    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
    playbackRateTemp.mSpeed = effectiveSpeed;
    playbackRateTemp.mPitch = effectivePitch;
    mProxy->setPlaybackRate(playbackRateTemp);
    mProxy->setMinimum(mNotificationFramesAct);


    if (mDualMonoMode != AUDIO_DUAL_MONO_MODE_OFF) {
        setDualMonoMode_l(mDualMonoMode);
    }
    if (mAudioDescriptionMixLeveldB != -std::numeric_limits<float>::infinity()) {
        setAudioDescriptionMixLevel_l(mAudioDescriptionMixLeveldB);
    }


    mDeathNotifier = new DeathNotifier(this);
    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);


    // This is the first log sent from the AudioTrack client.
    // The creation of the audio track by AudioFlinger (in the code above)
    // is the first log of the AudioTrack and must be present before
    // any AudioTrack client logs will be accepted.


    mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(mPortId);
    mediametrics::LogItem(mMetricsId)
        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE)
        // the following are immutable
        .set(AMEDIAMETRICS_PROP_FLAGS, toString(mFlags).c_str())
        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
        .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
        .set(AMEDIAMETRICS_PROP_LOGSESSIONID, mLogSessionId)
        .set(AMEDIAMETRICS_PROP_PLAYERIID, mPlayerIId)
        .set(AMEDIAMETRICS_PROP_TRACKID, mPortId) // dup from key
        .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
        .set(AMEDIAMETRICS_PROP_USAGE, toString(mAttributes.usage).c_str())
        .set(AMEDIAMETRICS_PROP_THREADID, (int32_t)output.outputId)
        .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
        .set(AMEDIAMETRICS_PROP_ROUTEDDEVICEID, (int32_t)mRoutedDeviceId)
        .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
        .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
        .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
        // the following are NOT immutable
        .set(AMEDIAMETRICS_PROP_VOLUME_LEFT, (double)mVolume[AUDIO_INTERLEAVE_LEFT])
        .set(AMEDIAMETRICS_PROP_VOLUME_RIGHT, (double)mVolume[AUDIO_INTERLEAVE_RIGHT])
        .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
        .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)NO_ERROR)
        .set(AMEDIAMETRICS_PROP_AUXEFFECTID, (int32_t)mAuxEffectId)
        .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
        .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
        .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
        .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
                AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)effectiveSampleRate)
        .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
                AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)effectiveSpeed)
        .set(AMEDIAMETRICS_PROP_PREFIX_EFFECTIVE
                AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)effectivePitch)
        .record();


    // mSendLevel
    // mReqFrameCount?
    // mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq
    // mLatency, mAfLatency, mAfFrameCount, mAfSampleRate


    }


exit:
    if (status != NO_ERROR) {
        if (callbackAdded) {
            // note: mOutput is always valid is callbackAdded is true
            AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
        }
        ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
        reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
    }
    mStatus = status;


    // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
    return status;
}

调用audioFlinger的createTrack函数创建PlaybackTrack,是通过IAudioFlinger接口:

//frameworks/av/media/libaudioclient/include/media/IAudioFlinger.h
class IAudioFlinger : public virtual RefBase {
    ......
    virtual status_t createTrack(const media::CreateTrackRequest& input,
                                 media::CreateTrackResponse& output) = 0;
    ......
}

AudioFlinger createTrack

调用audioFlinger的createTrack方法:

Android13 AudioFlinger createTrack流程分析-CSDN博客

  • 10
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值