Android Audio Subsystem - AudioTrack

 

 

 

sm(BpServiceManager(BpBinder(handle= 0))) = defaultServiceManager()

audioFlinger =sm->getServcie(String16(kAudioFlingerName))

audioFlinger =BpAudioFlinger(BpBinder) ;

// audioFlinger实际指向一个BpAudioFlinger(BpBinder) 对象

上述两部是通过get_audio_flinger()实现的。

 

track =audioFlinger->createTrack()

这个 track 实际指向一个BpAudioTrack(BpBinder) 对象, BpAudioTrackmRemote成员变量指向BpBinder.

 

 

1.    AudioTrack::getMinBufferSize()

1.1          AudioTrack::getMinBufferSize()

Frameworks/base/media/java/android/media/AudioTrack.java

static public int getMinBufferSize(intsampleRateInHz, int channelConfig, int audioFormat) {

        int channelCount = 0;

        switch(channelConfig) {

        case AudioFormat.CHANNEL_OUT_MONO:

        caseAudioFormat.CHANNEL_CONFIGURATION_MONO:

            channelCount = 1;

            break;

        case AudioFormat.CHANNEL_OUT_STEREO:

        caseAudioFormat.CHANNEL_CONFIGURATION_STEREO:

            channelCount = 2;

            break;

        default:

            if ((channelConfig &SUPPORTED_OUT_CHANNELS) != channelConfig) {

                // input channel configurationfeatures unsupported channels

                loge("getMinBufferSize():Invalid channel configuration.");

                return ERROR_BAD_VALUE;

            } else {

                channelCount = Integer.bitCount(channelConfig);

            }

        }

 

        if ((audioFormat !=AudioFormat.ENCODING_PCM_16BIT)

            && (audioFormat !=AudioFormat.ENCODING_PCM_8BIT)) {

            loge("getMinBufferSize():Invalid audio format.");

            return ERROR_BAD_VALUE;

        }

 

        // sample rate, note these values aresubject to change

        if ( (sampleRateInHz <SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) {

            loge("getMinBufferSize():" + sampleRateInHz + " Hz is not a supported sample rate.");

            return ERROR_BAD_VALUE;

        }

// 调用JNI native_get_min_buff_size

        int size =native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);

        if (size <= 0) {

            loge("getMinBufferSize():error querying hardware");

            return ERROR;

        }

        else {

            return size;

        }

   }

1.2          android_media_AudioTrack_get_min_buff_size()

frameworks/base/core/jni/android_media_AudioTrack.cpp

 

//----------------------------------------------------------------------------

//returns the minimum required size for the successful creation of a streamingAudioTrack

//returns -1 if there was an error querying the hardware.

staticjint android_media_AudioTrack_get_min_buff_size(JNIEnv *env,  jobject thiz,

    jint sampleRateInHertz, jint nbChannels,jint audioFormat) {

 

 

// size

    size_t frameCount = 0;

    if(AudioTrack::getMinFrameCount(&frameCount, AUDIO_STREAM_DEFAULT,

            sampleRateInHertz) != NO_ERROR) {

        return -1;

    }

    return frameCount * nbChannels *(audioFormat == javaAudioTrackFields.PCM16 ? 2 : 1);

}

 

1.3          AudioTrack::getMinFrameCount()

Frameworks/av/media/libmedia/AudioTrack.cpp

status_tAudioTrack::getMinFrameCount(

        size_t* frameCount,

        audio_stream_type_t streamType,

        uint32_t sampleRate)

{

    if (frameCount == NULL) {

        return BAD_VALUE;

    }

 

    // default to 0 in case of error

    *frameCount = 0;

 

    // FIXME merge with similar code increateTrack_l(), except we're missing

    //      some information here that is available in createTrack_l():

    //         audio_io_handle_t output

    //         audio_format_t format

    //         audio_channel_mask_t channelMask

    //         audio_output_flags_t flags

    uint32_t afSampleRate;

    if(AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR){

        return NO_INIT;

    }

    size_t afFrameCount;   // 最大采样率下一帧的size

    if(AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {

        return NO_INIT;

    }

    uint32_t afLatency;

    if(AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {

        return NO_INIT;

    }

 

    // Ensure that buffer depth covers at leastaudio hardware latency

    uint32_t minBufCount = afLatency / ((1000 *afFrameCount) / afSampleRate);

    if (minBufCount < 2) minBufCount = 2;

 

//   如果传入的采样率为0那么取系统最大采样率。

//   那么最小缓冲区size (*framecount) = afFrameCount * minBufCount

//   否则 size = afFrameCount * minBufCount * sampleRate /afSampleRate;

 

    *frameCount =(sampleRate == 0) ? afFrameCount * minBufCount :

           afFrameCount * minBufCount * sampleRate / afSampleRate;

    ALOGV("getMinFrameCount=%d:afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",

            *frameCount, afFrameCount,minBufCount, afSampleRate, afLatency);

    return NO_ERROR;

}

 

 

 

 

 

 

 

 

 

 

2.    AudioTrack:: AudioTrack()

2.1  AudioTrack::AudioTrack()

Frameworks/base/media/java/android/media/AudioTrack.java

public AudioTrack(int streamType, int sampleRateInHz, intchannelConfig, int audioFormat,

            int bufferSizeInBytes, int mode,int sessionId)

    throws IllegalArgumentException {

        // mState already ==STATE_UNINITIALIZED

 

        // remember which looper is associatedwith the AudioTrack instantiation

        Looper looper;

        if ((looper = Looper.myLooper()) ==null) {

            looper = Looper.getMainLooper();

        }

        mInitializationLooper = looper;

 

// 检查参数的合法性,有效性

        audioParamCheck(streamType,sampleRateInHz, channelConfig, audioFormat, mode);

 

        audioBuffSizeCheck(bufferSizeInBytes);

 

        if (sessionId < 0) {

            throw newIllegalArgumentException("Invalid audio session ID: "+sessionId);

        }

 

        int[] session = new int[1];

        session[0] = sessionId;

        // native initialization  call jni method

        int initResult= native_setup(new WeakReference<AudioTrack>(this),

               mStreamType, mSampleRate, mChannels, mAudioFormat,

               mNativeBufferSizeInBytes, mDataLoadMode, session);

        if (initResult != SUCCESS) {

            loge("Error code"+initResult+" when initializing AudioTrack.");

            return; // with mState == STATE_UNINITIALIZED

        }

 

        mSessionId = session[0];

 

        if (mDataLoadMode == MODE_STATIC) {

            mState = STATE_NO_STATIC_DATA;

        } else {

            mState = STATE_INITIALIZED;

        }

    }

 

2.2   android_media_AudioTrack_native_setup()

frameworks/base/core/jni/android_media_AudioTrack.cpp

//----------------------------------------------------------------------------

staticint

android_media_AudioTrack_native_setup(JNIEnv*env, jobject thiz, jobject weak_this,

        jint streamType, jintsampleRateInHertz, jint javaChannelMask,

        jint audioFormat, jint buffSizeInBytes,jint memoryMode, jintArray jSession)

{

    ALOGV("sampleRate=%d, audioFormat(fromJava)=%d, channel mask=%x, buffSize=%d",

        sampleRateInHertz, audioFormat,javaChannelMask, buffSizeInBytes);

    uint32_t afSampleRate;

    size_t afFrameCount;

 

    if(AudioSystem::getOutputFrameCount(&afFrameCount, (audio_stream_type_t)streamType) != NO_ERROR) {

        ALOGE("Error creating AudioTrack:Could not get AudioSystem frame count.");

        returnAUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;

    }

    if(AudioSystem::getOutputSamplingRate(&afSampleRate, (audio_stream_type_t)streamType) != NO_ERROR) {

        ALOGE("Error creating AudioTrack:Could not get AudioSystem sampling rate.");

        returnAUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;

    }

 

    // Java channel masks don't map directly tothe native definition, but it's a simple shift

    // to skip the two deprecated channelconfigurations "default" and "mono".

   uint32_t nativeChannelMask = ((uint32_t)javaChannelMask) >> 2;

 

    if(!audio_is_output_channel(nativeChannelMask)) {

        ALOGE("Error creating AudioTrack:invalid channel mask.");

        returnAUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;

    }

 

    intnbChannels = popcount(nativeChannelMask);

 

    // check the stream type

    audio_stream_type_t atStreamType;

    switch (streamType) {

    case AUDIO_STREAM_VOICE_CALL:

    case AUDIO_STREAM_SYSTEM:

    case AUDIO_STREAM_RING:

    case AUDIO_STREAM_MUSIC:

    case AUDIO_STREAM_ALARM:

    case AUDIO_STREAM_NOTIFICATION:

    case AUDIO_STREAM_BLUETOOTH_SCO:

    case AUDIO_STREAM_DTMF:

        atStreamType = (audio_stream_type_t)streamType;

        break;

    default:

        ALOGE("Error creating AudioTrack:unknown stream type.");

        returnAUDIOTRACK_ERROR_SETUP_INVALIDSTREAMTYPE;

    }

 

    // check the format.

    // This function was called from Java, sowe compare the format against the Java constants

    if ((audioFormat != javaAudioTrackFields.PCM16)&& (audioFormat != javaAudioTrackFields.PCM8)) {

        ALOGE("Error creating AudioTrack:unsupported audio format.");

        returnAUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;

    }

 

    // for the moment 8bitPCM in MODE_STATIC isnot supported natively in the AudioTrack C++ class

    // so we declare everything as 16bitPCM,the 8->16bit conversion for MODE_STATIC will be handled

    // inandroid_media_AudioTrack_native_write_byte()

    if ((audioFormat ==javaAudioTrackFields.PCM8)

        && (memoryMode == MODE_STATIC)){

       ALOGV("android_media_AudioTrack_native_setup(): requestingMODE_STATIC for 8bit \

            buff size of %dbytes, switching to16bit, buff size of %dbytes",

            buffSizeInBytes,2*buffSizeInBytes);

        audioFormat =javaAudioTrackFields.PCM16;

        // we will need twice the memory tostore the data

        buffSizeInBytes *= 2;

    }

 

    // compute the frame count

    int bytesPerSample = audioFormat ==javaAudioTrackFields.PCM16 ? 2 : 1;

    audio_format_t format = audioFormat ==javaAudioTrackFields.PCM16 ?

            AUDIO_FORMAT_PCM_16_BIT :AUDIO_FORMAT_PCM_8_BIT;

 

// 计算一帧的size

    int frameCount =buffSizeInBytes / (nbChannels * bytesPerSample);

 

    jclass clazz =env->GetObjectClass(thiz);

    if (clazz == NULL) {

        ALOGE("Can't find %s when settingup callback.", kClassPathName);

        returnAUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;

    }

 

    if (jSession == NULL) {

        ALOGE("Error creating AudioTrack:invalid session ID pointer");

        return AUDIOTRACK_ERROR;

    }

 

    jint* nSession = (jint *)env->GetPrimitiveArrayCritical(jSession, NULL);

    if (nSession == NULL) {

        ALOGE("Error creating AudioTrack:Error retrieving session id pointer");

        return AUDIOTRACK_ERROR;

    }

    int sessionId = nSession[0];

   env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);

    nSession = NULL;

 

    // create thenative AudioTrack object

   sp<AudioTrack> lpTrack = new AudioTrack();

 

    // initialize the callback information:

    // this data will be passed with everyAudioTrack callback

   AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();

 

    lpJniStorage->mStreamType =atStreamType;

   lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);

    // we use a weak reference so theAudioTrack object can be garbage collected.

   lpJniStorage->mCallbackData.audioTrack_ref =env->NewGlobalRef(weak_this);

    lpJniStorage->mCallbackData.busy =false;

 

    // initialize the native AudioTrack object

    switch (memoryMode) {

    case MODE_STREAM:

 

        lpTrack->set(

            atStreamType,// stream type

            sampleRateInHertz,

            format,// word length, PCM

            nativeChannelMask,

            frameCount,

            AUDIO_OUTPUT_FLAG_NONE,

            audioCallback,&(lpJniStorage->mCallbackData),//callback, callback data (user)

            0, // notificationFrames == 0 sincenot using EVENT_MORE_DATA to feed the AudioTrack

            0,

// shared mem, 0 for stream type, 实际共享内存由AudioFlinger 创建

            true,// thread can call Java

            sessionId);// audio session ID

        break;

 

    case MODE_STATIC:

        // AudioTrack is using shared memory

 

        if(!lpJniStorage->allocSharedMem(buffSizeInBytes)) {

            ALOGE("Error creatingAudioTrack in static mode: error creating mem heap base");

            goto native_init_failure;

        }

 

        lpTrack->set(

            atStreamType,// stream type

            sampleRateInHertz,

            format,// word length, PCM

            nativeChannelMask,

            frameCount,

            AUDIO_OUTPUT_FLAG_NONE,

            audioCallback,&(lpJniStorage->mCallbackData),//callback, callback data (user));

            0,// notificationFrames == 0 sincenot using EVENT_MORE_DATA to feed the AudioTrack

            lpJniStorage->mMemBase,// sharedmem

            true,// thread can call Java

            sessionId);// audio session ID

        break;

 

    default:

        ALOGE("Unknown mode %d",memoryMode);

        goto native_init_failure;

    }

 

    if (lpTrack->initCheck() != NO_ERROR) {

        ALOGE("Error initializingAudioTrack");

        goto native_init_failure;

    }

 

    nSession = (jint *)env->GetPrimitiveArrayCritical(jSession, NULL);

    if (nSession == NULL) {

        ALOGE("Error creating AudioTrack:Error retrieving session id pointer");

        goto native_init_failure;

    }

    // read the audio session ID back fromAudioTrack in case we create a new session

    nSession[0] = lpTrack->getSessionId();

   env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);

    nSession = NULL;

    {  // scope for the lock

        Mutex::Autolock l(sLock);

       sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);

    }

    // save our newly created C++ AudioTrack inthe "nativeTrackInJavaObj" field

    // of the Java object (inmNativeTrackInJavaObj)

 

// lpTrack本地AudioTrack对象保存在java env

// 这个AudioTrack对象的成员变量mAudioTrack保存着服务端代理

//  BpAudioTrack(BpBinder)对象

 

    setAudioTrack(env,thiz, lpTrack);

 

    // save the JNI resources so we can freethem later

    //ALOGV("storing lpJniStorage:%x\n", (int)lpJniStorage);

    env->SetIntField(thiz,javaAudioTrackFields.jniData, (int)lpJniStorage);

 

    return AUDIOTRACK_SUCCESS;

    // failures:

native_init_failure:

    if (nSession != NULL) {

       env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);

    }

   env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);

   env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);

    delete lpJniStorage;

    env->SetIntField(thiz,javaAudioTrackFields.jniData, 0);

 

    return AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;

}

 

 

2.3   AudioTrack::set()

status_tAudioTrack::set(

        audio_stream_type_t streamType,

        uint32_t sampleRate,

        audio_format_t format,

        audio_channel_mask_t channelMask,

        int frameCountInt,

        audio_output_flags_t flags,

        callback_t cbf,

        void* user,

        int notificationFrames,

        const sp<IMemory>&sharedBuffer,

        bool threadCanCallJava,

        int sessionId)

{

    // FIXME "int" here is legacy andwill be replaced by size_t later

    if (frameCountInt < 0) {

        ALOGE("Invalid frame count%d", frameCountInt);

        return BAD_VALUE;

    }

    size_t frameCount = frameCountInt;

 

    ALOGV_IF(sharedBuffer != 0,"sharedBuffer: %p, size: %d", sharedBuffer->pointer(),

            sharedBuffer->size());

 

    ALOGV("set() streamType %d frameCount%u flags %04x", streamType, frameCount, flags);

 

    AutoMutex lock(mLock);

    if (mAudioTrack != 0) {

        ALOGE("Track already inuse");

        return INVALID_OPERATION;

    }

 

    // handle default values first.

    if (streamType == AUDIO_STREAM_DEFAULT) {

        streamType = AUDIO_STREAM_MUSIC;

    }

 

    if (sampleRate == 0) {

        uint32_t afSampleRate;

        if (AudioSystem::getOutputSamplingRate(&afSampleRate,streamType) != NO_ERROR) {

            return NO_INIT;

        }

        sampleRate = afSampleRate;

    }

    mSampleRate = sampleRate;

 

    // these below should probably come fromthe audioFlinger too...

    if (format == AUDIO_FORMAT_DEFAULT) {

        format = AUDIO_FORMAT_PCM_16_BIT;

    }

    if (channelMask == 0) {

        channelMask = AUDIO_CHANNEL_OUT_STEREO;

    }

 

    // validate parameters

    if (!audio_is_valid_format(format)) {

        ALOGE("Invalid format");

        return BAD_VALUE;

    }

 

    // AudioFlinger does not currently support8-bit data in shared memory

    if (format == AUDIO_FORMAT_PCM_8_BIT&& sharedBuffer != 0) {

        ALOGE("8-bit data in shared memoryis not supported");

        return BAD_VALUE;

    }

 

    //force direct flag if format is not linear PCM

    if (!audio_is_linear_pcm(format)) {

        flags = (audio_output_flags_t)

                // FIXME why can't we allowdirect AND fast?

                ((flags |AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);

    }

    // only allow deep buffering for musicstream type

    if (streamType != AUDIO_STREAM_MUSIC) {

        flags = (audio_output_flags_t)(flags&~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);

    }

 

    if (!audio_is_output_channel(channelMask)){

       ALOGE("Invalid channel mask %#x", channelMask);

        return BAD_VALUE;

    }

    mChannelMask = channelMask;

    uint32_t channelCount =popcount(channelMask);

    mChannelCount = channelCount;

 

    if (audio_is_linear_pcm(format)) {

        mFrameSize = channelCount *audio_bytes_per_sample(format);

        mFrameSizeAF = channelCount *sizeof(int16_t);

    } else {

        mFrameSize = sizeof(uint8_t);

        mFrameSizeAF = sizeof(uint8_t);

    }

 

// 选择合适的线程,并返回其在AudioFlinger中的索引

    audio_io_handle_t output =AudioSystem::getOutput(

                                    streamType,

                                    sampleRate,format, channelMask,

                                    flags);

 

    if (output == 0) {

        ALOGE("Could not get audio outputfor stream type %d", streamType);

        return BAD_VALUE;

    }

 

    mVolume[LEFT] = 1.0f;

    mVolume[RIGHT] = 1.0f;

    mSendLevel = 0.0f;

    mFrameCount = frameCount;

    mReqFrameCount = frameCount;

    mNotificationFramesReq = notificationFrames;

    mSessionId = sessionId;

    mAuxEffectId = 0;

    mFlags = flags;

    mCbf = cbf;

 

    if (cbf != NULL) {

        mAudioTrackThread = newAudioTrackThread(*this, threadCanCallJava);

       mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO,0 /*stack*/);

    }

 

    // create  AudioTrack

    status_t status =createTrack_l(streamType,

                                  sampleRate,

                                  format,

                                  frameCount,

                                  flags,

                                  sharedBuffer,

                                  output);

 

    if (status != NO_ERROR) {

        if (mAudioTrackThread != 0) {

           mAudioTrackThread->requestExit();

            mAudioTrackThread.clear();

        }

        return status;

    }

 

    mStatus = NO_ERROR;

 

    mStreamType = streamType;

    mFormat = format;

 

    mSharedBuffer = sharedBuffer;

    mActive = false;

    mUserData = user;

    mLoopCount = 0;

    mMarkerPosition = 0;

    mMarkerReached = false;

    mNewPosition = 0;

    mUpdatePeriod = 0;

    mFlushed = false;

   AudioSystem::acquireAudioSessionId(mSessionId);

    return NO_ERROR;

}

3.  AudioTrack::createTrack_l

// mustbe called with mLock held

status_tAudioTrack::createTrack_l(

        audio_stream_type_t streamType,

        uint32_t sampleRate,

        audio_format_t format,

        size_t frameCount,

        audio_output_flags_t flags,

        const sp<IMemory>&sharedBuffer,

        audio_io_handle_t output)

{

    status_t status;

 

    constsp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();

 

// 这儿audioFlinger 是一个 BpAudioFlinger(BpBinder) 对象。

 

    if (audioFlinger == 0) {

        ALOGE("Could not getaudioflinger");

        return NO_INIT;

    }

 

    uint32_t afLatency;

    if (AudioSystem::getLatency(output,streamType, &afLatency) != NO_ERROR) {

        return NO_INIT;

    }

 

    // Client decides whether the track isTIMED (see below), but can only express a preference

    // for FAST.  Server will perform additional tests.

    if ((flags & AUDIO_OUTPUT_FLAG_FAST)&& !(

            // either of these use cases:

            // use case 1: shared buffer

            (sharedBuffer != 0) ||

            // use case 2: callback handler

            (mCbf != NULL))) {

        ALOGW("AUDIO_OUTPUT_FLAG_FASTdenied by client");

        // once denied, do not request again ifIAudioTrack is re-created

        flags = (audio_output_flags_t) (flags& ~AUDIO_OUTPUT_FLAG_FAST);

        mFlags = flags;

}

 

    mNotificationFramesAct =mNotificationFramesReq;

 

    if (!audio_is_linear_pcm(format)) {

 

        if (sharedBuffer != 0) {

            // Same comment as below aboutignoring frameCount parameter for set()

            frameCount =sharedBuffer->size();

        } else if (frameCount == 0) {

            size_t afFrameCount;

            if(AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR){

                return NO_INIT;

            }

            frameCount = afFrameCount;

        }

 

    } else if (sharedBuffer != 0) {

 

        // Ensure that buffer alignment matcheschannel count

        // 8-bit data in shared memory is notcurrently supported by AudioFlinger

        size_t alignment = /* format ==AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;

        if (mChannelCount > 1) {

            // More than 2 channels does notrequire stronger alignment than stereo

            alignment <<= 1;

        }

        if (((size_t)sharedBuffer->pointer()& (alignment - 1)) != 0) {

            ALOGE("Invalid buffer alignment:address %p, channel count %u",

                    sharedBuffer->pointer(),mChannelCount);

            return BAD_VALUE;

        }

 

        // When initializing a shared bufferAudioTrack via constructors,

        // there's no frameCount parameter.

        // But when initializing a sharedbuffer AudioTrack via set(),

        // there _is_ a frameCountparameter.  We silently ignore it.

        frameCount =sharedBuffer->size()/mChannelCount/sizeof(int16_t);

 

    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)){

 

        // FIXME move these calculations andassociated checks to server

        uint32_t afSampleRate;

        if(AudioSystem::getSamplingRate(output, streamType, &afSampleRate) !=NO_ERROR) {

            return NO_INIT;

        }

        size_t afFrameCount;

        if (AudioSystem::getFrameCount(output,streamType, &afFrameCount) != NO_ERROR) {

            return NO_INIT;

        }

 

        // Ensure that buffer depth covers atleast audio hardware latency

        uint32_t minBufCount = afLatency /((1000 * afFrameCount)/afSampleRate);

        if (minBufCount < 2) minBufCount =2;

 

        size_t minFrameCount =(afFrameCount*sampleRate*minBufCount)/afSampleRate;

        ALOGV("minFrameCount: %u,afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"

                ", afLatency=%d",

                minFrameCount, afFrameCount,minBufCount, sampleRate, afSampleRate, afLatency);

 

        if (frameCount == 0) {

            frameCount = minFrameCount;

        }

        if (mNotificationFramesAct == 0) {

            mNotificationFramesAct =frameCount/2;

        }

        // Make sure that application isnotified with sufficient margin

        // before underrun

        if (mNotificationFramesAct >frameCount/2) {

            mNotificationFramesAct =frameCount/2;

        }

        if (frameCount < minFrameCount) {

            // not ALOGW because it happens allthe time when playing key clicks over A2DP

            ALOGV("Minimum buffer sizecorrected from %d to %d",

                     frameCount, minFrameCount);

            frameCount = minFrameCount;

        }

 

    } else {

        // For fast tracks, the frame countcalculations and checks are done by server

    }

 

    IAudioFlinger::track_flags_t trackFlags =IAudioFlinger::TRACK_DEFAULT;

    if (mIsTimed) {

        trackFlags |=IAudioFlinger::TRACK_TIMED;

    }

 

    pid_t tid = -1;

    if (flags & AUDIO_OUTPUT_FLAG_FAST) {

        trackFlags |=IAudioFlinger::TRACK_FAST;

        if (mAudioTrackThread != 0) {

            tid = mAudioTrackThread->getTid();

        }

    }

// AudioFlinger 发送creatTrack请求

// stream模式下, AudioTrackAudioFlinger是通过共享内存来交互数据的

// 共享内存是AudioFlinger通过createTrack来创建的

// 这儿track实际指向一个BpAudioTrack(BpBinder)对象

 

   sp<IAudioTrack> track = audioFlinger->createTrack(streamType,

                                                     sampleRate,

                                                     // AudioFlinger only sees 16-bit PCM

                                                     format == AUDIO_FORMAT_PCM_8_BIT ?

                                                             AUDIO_FORMAT_PCM_16_BIT : format,

                                                     mChannelMask,

                                                     frameCount,

                                                     &trackFlags,

                                                      sharedBuffer,

                                                     output,

                                                     tid,

                                                     &mSessionId,

                                                      &status);

 

    if (track == 0) {

        ALOGE("AudioFlinger could notcreate track, status: %d", status);

        return status;

    }

 

// 共享内存对象

    sp<IMemory> iMem =track->getCblk();

    if (iMem == 0) {

        ALOGE("Could not get controlblock");

        return NO_INIT;

    }

 

// track 对象(BpAudioTrack(BpBinder)) 赋值给 

// AudioTrack对象的成员变量mAudioTrack

 

    mAudioTrack =track;

    mCblkMemory = iMem;

    audio_track_cblk_t* cblk =static_cast<audio_track_cblk_t*>(iMem->pointer());

    mCblk = cblk;

    size_t temp = cblk->frameCount_;

    if (temp < frameCount || (frameCount ==0 && temp == 0)) {

        // In current design, AudioTrack clientchecks and ensures frame count validity before

        // passing it to AudioFlinger soAudioFlinger should not return a different value except

        // for fast track as it uses a specialmethod of assigning frame count.

        ALOGW("Requested frameCount %u butreceived frameCount %u", frameCount, temp);

    }

    frameCount = temp;

    mAwaitBoost = false;

    if (flags & AUDIO_OUTPUT_FLAG_FAST) {

        if (trackFlags &IAudioFlinger::TRACK_FAST) {

            ALOGV("AUDIO_OUTPUT_FLAG_FASTsuccessful; frameCount %u", frameCount);

            mAwaitBoost = true;

        }else {

            ALOGV("AUDIO_OUTPUT_FLAG_FASTdenied by server; frameCount %u", frameCount);

            // once denied, do not requestagain if IAudioTrack is re-created

            flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_FAST);

            mFlags = flags;

        }

        if (sharedBuffer == 0) {

            mNotificationFramesAct =frameCount/2;

        }

    }

    if (sharedBuffer == 0) {

        mBuffers = (char*)cblk +sizeof(audio_track_cblk_t);

    } else {

        mBuffers = sharedBuffer->pointer();

    }

 

   mAudioTrack->attachAuxEffect(mAuxEffectId);

    cblk->bufferTimeoutMs =MAX_STARTUP_TIMEOUT_MS;

    cblk->waitTimeMs = 0;

    mRemainingFrames = mNotificationFramesAct;

    // FIXME don't believe this lie

    mLatency = afLatency + (1000*frameCount) /sampleRate;

    mFrameCount = frameCount;

    // If IAudioTrack is re-created, don't letthe requested frameCount

    // decrease.  This can confuse clients that cacheframeCount().

    if (frameCount > mReqFrameCount) {

        mReqFrameCount = frameCount;

    }

 

    // update proxy

    delete mProxy;

    mProxy = new AudioTrackClientProxy(cblk,mBuffers, frameCount, mFrameSizeAF);

   mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000))<< 16) |

            uint16_t(mVolume[LEFT] * 0x1000));

    mProxy->setSendLevel(mSendLevel);

    mProxy->setSampleRate(mSampleRate);

    if (sharedBuffer != 0) {

        // Force buffer full condition as datais already present in shared memory

        mProxy->stepUser(frameCount);

    }

 

    return NO_ERROR;

}

 

4.  BpAudioFlinger::createTrack()

Frameworks/av/media/libmedia/IAudioFlinger.cpp

virtualsp<IAudioTrack> createTrack(

                               audio_stream_type_t streamType,

                                uint32_tsampleRate,

                                audio_format_tformat,

                               audio_channel_mask_t channelMask,

                                size_tframeCount,

                                track_flags_t*flags,

                                const sp<IMemory>&sharedBuffer,

                               audio_io_handle_t output,

                                pid_t tid,

                                int *sessionId,

                                status_t*status)

    {

        Parcel data, reply;

        sp<IAudioTrack> track;

       data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());

        data.writeInt32((int32_t) streamType);

        data.writeInt32(sampleRate);

        data.writeInt32(format);

        data.writeInt32(channelMask);

        data.writeInt32(frameCount);

        track_flags_t lFlags = flags != NULL ?*flags : (track_flags_t) TRACK_DEFAULT;

        data.writeInt32(lFlags);

       data.writeStrongBinder(sharedBuffer->asBinder());

        data.writeInt32((int32_t) output);

        data.writeInt32((int32_t) tid);

        int lSessionId = 0;

        if (sessionId != NULL) {

            lSessionId = *sessionId;

        }

        data.writeInt32(lSessionId);

        status_tlStatus = remote()->transact(CREATE_TRACK, data, &reply);

        if (lStatus != NO_ERROR) {

            ALOGE("createTrack error:%s", strerror(-lStatus));

        } else {

            lFlags = reply.readInt32();

            if (flags != NULL) {

                *flags = lFlags;

            }

            lSessionId = reply.readInt32();

            if (sessionId != NULL) {

                *sessionId = lSessionId;

            }

            lStatus = reply.readInt32();

 

// replay.readStrongBinder() 将根据replyBnAudioTrack(BnBinder)对象生成

// 对应的本地接口BpBinder对象;  

// interface_cast 将生成一个 BpAudioTrack(BpBinder)对象,并将BpBinder

// BpBinder保存在BpAudioTrack对象的mRemote 成员变量中.

 

            track =interface_cast<IAudioTrack>(reply.readStrongBinder());

        }

        if (status) {

            *status = lStatus;

        }

        return track;

    }

5.  BpBinder::transact()

Frameworks/native/libs/binder/BpBinder.cpp

status_tBpBinder::transact(

    uint32_t code, const Parcel& data,Parcel* reply, uint32_t flags)

{

    // Once a binder has died, it will nevercome back to life.

    if (mAlive) {

        status_t status= IPCThreadState::self()->transact(

            mHandle,code, data, reply, flags);

        if (status == DEAD_OBJECT) mAlive = 0;

        return status;

    }

 

    return DEAD_OBJECT;

}

6.  IPCTrheadState::transact()

Camera类似。Transact()->writeTransactionData()

                       -> waitForResponse()->talkWithDriver()

                                        ->executeCommand()->

BBinder::transaction()

 

7.  BBinder::tansaction()

Frameworks/native/libs/binder/Binder.cpp

status_t BBinder::transact(

    uint32_t code, constParcel& data, Parcel* reply, uint32_t flags)

{

    data.setDataPosition(0);

 

    status_t err = NO_ERROR;

    switch (code) {

        case PING_TRANSACTION:

           reply->writeInt32(pingBinder());

            break;

        default:

           err = onTransact(code, data, reply, flags);

            break;

    }

 

    if (reply != NULL) {

       reply->setDataPosition(0);

    }

 

    return err;

}

 

8.  BnAudioFlinger::onTransact()

Frameworks/av/media/libmedia/IAudioFlinger.cpp

status_tBnAudioFlinger::onTransact(

    uint32_t code, const Parcel& data,Parcel* reply, uint32_t flags)

{

    switch (code) {

        case CREATE_TRACK: {

           CHECK_INTERFACE(IAudioFlinger, data, reply);

            int streamType= data.readInt32();

            uint32_tsampleRate = data.readInt32();

           audio_format_t format = (audio_format_t) data.readInt32();

           audio_channel_mask_t channelMask = data.readInt32();

            size_tframeCount = data.readInt32();

           track_flags_t flags = (track_flags_t) data.readInt32();

           sp<IMemory> buffer =interface_cast<IMemory>(data.readStrongBinder());

           audio_io_handle_t output = (audio_io_handle_t) data.readInt32();

            pid_t tid =(pid_t) data.readInt32();

            intsessionId = data.readInt32();

            status_tstatus;

           sp<IAudioTrack> track = createTrack(

                   (audio_stream_type_t) streamType, sampleRate, format,

                   channelMask, frameCount, &flags, buffer, output, tid,&sessionId, &status);

           reply->writeInt32(flags);

           reply->writeInt32(sessionId);

           reply->writeInt32(status);

           reply->writeStrongBinder(track->asBinder());

            return NO_ERROR;

        } break;

        case OPEN_RECORD: {

            CHECK_INTERFACE(IAudioFlinger,data, reply);

            audio_io_handle_t input =(audio_io_handle_t) data.readInt32();

            uint32_t sampleRate =data.readInt32();

            audio_format_t format = (audio_format_t)data.readInt32();

            audio_channel_mask_t channelMask =data.readInt32();

            size_t frameCount =data.readInt32();

            track_flags_t flags =(track_flags_t) data.readInt32();

            pid_t tid = (pid_t) data.readInt32();

            int sessionId = data.readInt32();

            status_t status;

            sp<IAudioRecord> record =openRecord(input,

                    sampleRate, format,channelMask, frameCount, flags, tid, &sessionId, &status);

            reply->writeInt32(sessionId);

            reply->writeInt32(status);

           reply->writeStrongBinder(record->asBinder());

            return NO_ERROR;

        } break;

…………………………………………………………….

        default:

            returnBBinder::onTransact(code, data, reply, flags);

}

 

class AudioFlinger :   public BinderService<AudioFlinger>,    public BnAudioFlinger

AudioFlinger继承关系, 在前面得到AudioFlinger时,服务端回复给客户端的Binder对象,应该是一个AudioFlinger对象,它也是一个BnAudioFlinger对象,即一个BnBinder对象。

这个对象被写入服务端回复客户端的reply

 

9.  AudioFlinger::createTrack()

Frameworks/av/services/audioflinger/AudioFlinger.cpp

sp<IAudioTrack>AudioFlinger::createTrack(

        audio_stream_type_t streamType,

        uint32_t sampleRate,

        audio_format_t format,

        audio_channel_mask_t channelMask,

        size_t frameCount,

        IAudioFlinger::track_flags_t *flags,

        const sp<IMemory>& sharedBuffer,

        audio_io_handle_t output,

        pid_t tid,

        int *sessionId,

        status_t *status)

{

    sp<PlaybackThread::Track> track;

    sp<TrackHandle> trackHandle;

    sp<Client> client;

    status_t lStatus;

    int lSessionId;

 

    // client AudioTrack::set alreadyimplements AUDIO_STREAM_DEFAULT => AUDIO_STREAM_MUSIC,

    // but if someone uses binder directly theycould bypass that and cause us to crash

    if (uint32_t(streamType) >=AUDIO_STREAM_CNT) {

        ALOGE("createTrack() invalidstream type %d", streamType);

        lStatus = BAD_VALUE;

        goto Exit;

    }

 

    // client is responsible for conversion of8-bit PCM to 16-bit PCM,

    // and we don't yet support 8.24 or 32-bitPCM

    if (audio_is_linear_pcm(format) &&format != AUDIO_FORMAT_PCM_16_BIT) {

        ALOGE("createTrack() invalidformat %d", format);

        lStatus = BAD_VALUE;

        goto Exit;

    }

 

    {

        Mutex::Autolock _l(mLock);

        PlaybackThread *thread =checkPlaybackThread_l(output);

        PlaybackThread *effectThread = NULL;

        if (thread == NULL) {

            ALOGE("no playback threadfound for output handle %d", output);

            lStatus = BAD_VALUE;

            goto Exit;

        }

 

        pid_t pid = IPCThreadState::self()->getCallingPid();

        client =registerPid_l(pid);

 

        ALOGV("createTrack() sessionId:%d", (sessionId == NULL) ? -2 : *sessionId);

        if (sessionId != NULL &&*sessionId != AUDIO_SESSION_OUTPUT_MIX) {

            // check if an effect chain withthe same session ID is present on another

            // output thread and move it here.

            for (size_t i = 0; i <mPlaybackThreads.size(); i++) {

                sp<PlaybackThread> t =mPlaybackThreads.valueAt(i);

                if (mPlaybackThreads.keyAt(i)!= output) {

                    uint32_t sessions =t->hasAudioSession(*sessionId);

                    if (sessions &PlaybackThread::EFFECT_SESSION) {

                        effectThread = t.get();

                        break;

                    }

                }

            }

            lSessionId = *sessionId;

        } else {

            // if no audio session id isprovided, create one here

            lSessionId = nextUniqueId();

            if (sessionId != NULL) {

                *sessionId = lSessionId;

            }

        }

        ALOGV("createTrack() lSessionId:%d", lSessionId);

 

        track =thread->createTrack_l(client, streamType, sampleRate, format,

               channelMask, frameCount, sharedBuffer, lSessionId, flags, tid,&lStatus);

 

        // move effect chain to this outputthread if an effect on same session was waiting

        // for a track to be created

        if (lStatus == NO_ERROR &&effectThread != NULL) {

            Mutex::Autolock _dl(thread->mLock);

            Mutex::Autolock_sl(effectThread->mLock);

            moveEffectChain_l(lSessionId,effectThread, thread, true);

        }

 

        // Look for sync events awaiting for asession to be used.

        for (int i = 0; i < (int)mPendingSyncEvents.size();i++) {

            if(mPendingSyncEvents[i]->triggerSession() == lSessionId) {

                if(thread->isValidSyncEvent(mPendingSyncEvents[i])) {

                    if (lStatus == NO_ERROR) {

                        (void) track->setSyncEvent(mPendingSyncEvents[i]);

                    } else {

                       mPendingSyncEvents[i]->cancel();

                    }

                   mPendingSyncEvents.removeAt(i);

                    i--;

                }

            }

        }

    }

    if (lStatus == NO_ERROR) {

        trackHandle =new TrackHandle(track);

    } else {

        // remove local strong reference toClient before deleting the Track so that the Client

        // destructor is called by theTrackBase destructor with mLock held

        client.clear();

        track.clear();

    }

 

Exit:

    if (status != NULL) {

        *status = lStatus;

    }

    return trackHandle;

}

 

class TrackHandle : public android::BnAudioTrack

class BnAudioTrack : public BnInterface<IAudioTrack>

template<typename INTERFACE>

class BnInterface : public INTERFACE, publicBBinder

class BBinder : public IBinder

class IBinder : public virtual RefBase

 

TrackHandle 类继承图,因此返回的是一个TrackHandle, 也是一个

BnAudioTrack, BBinder, IBinder对象

 

10.             AudioFlinger::PlaybackThread::createTrack_l()

Frameworks/av/services/audioflinger/Threads.cpp

sp<AudioFlinger::PlaybackThread::Track>AudioFlinger::PlaybackThread::createTrack_l() {

………………………

        if (!isTimed) {

            track = new Track(this, client,streamType, sampleRate, format,

                    channelMask, frameCount,sharedBuffer, sessionId, *flags);

        } else {

            track = TimedTrack::create(this, client,streamType, sampleRate, format,

                    channelMask, frameCount,sharedBuffer, sessionId);

        }

        if (track == 0 || track->getCblk()== NULL || track->name() < 0) {

            lStatus = NO_MEMORY;

            goto Exit;

        }

       mTracks.add(track);

………………………

    return track;

}

 

 

 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值