Android -- Audio系统之AudioTrack内部实现简析(三)

Android -- Audio系统之AudioTrack内部实现简析(三)


上一篇中,我们介绍了一个使用AudioTrack的简单示例,并初步分析了使用AudioTrack API时每一步的Java调用实现;承接之前的内容,我们对应Demo中的示例,分析Java AudioTrack实现,最后得出了6个重要的函数调用:

  • native_get_min_buff_size():获取最小Buffer大小
  • native_setup():完成AudioTrack的创建
  • native_start():AudioTrack启动播放
  • native_write_byte()/native_write_short()...:往AudioTrack写入音频数据
  • natvie_stop():AudioTrack停止播放
  • native_release():释放AudioTrack绑定的系统资源
这几个函数调用将Java层的工作转入到了JNI层android_media_AudioTrack.cpp;下面就紧接着分析JNI中对这些内容是如何处理的。


一、android_media_AudioTrack_get_min_buff_size()


我们获取所需的最小Buffer大小时,会调用到native_get_min_buff_size()方法;而它对应的JNI实现是如下函数:

// ----------------------------------------------------------------------------
// returns the minimum required size for the successful creation of a streaming AudioTrack
// returns -1 if there was an error querying the hardware.
static jint android_media_AudioTrack_get_min_buff_size(JNIEnv *env,  jobject thiz,
    jint sampleRateInHertz, jint channelCount, jint audioFormat) {

    size_t frameCount;
    const status_t status = AudioTrack::getMinFrameCount(&frameCount, AUDIO_STREAM_DEFAULT,
            sampleRateInHertz);//根据信息,得到所需的最小帧个数
    if (status != NO_ERROR) {
        ALOGE("AudioTrack::getMinFrameCount() for sample rate %d failed with status %d",
                sampleRateInHertz, status);
        return -1;
    }
    const audio_format_t format = audioFormatToNative(audioFormat);//Java和JNI之间的类型转换
    if (audio_has_proportional_frames(format)) {//只在PCM和IEC61937时,返回TRUE
        const size_t bytesPerSample = audio_bytes_per_sample(format);//返回该格式下每个采样点有多少字节数据
        return frameCount * channelCount * bytesPerSample;//一单位的帧 = 一个采样点的字节数 X 声道数;一单位的帧数据 X 所需帧的最小个数 = 这里的min_buf_size.
    } else {
        return frameCount;
    }
}

首先调用了AudioTrack::getMinFrameCount()函数。这里的AudioTrack存在于Native空间,它与前面使用的Java AudioTrack对应,是它在Native空间的代理,负责与AudioFlinger交互。

getMinFrameCount()函数会通过AudioSystem去查询当前Audio硬件支持的各种信息,进行综合得出这里所需的最小帧个数:

// static
status_t AudioTrack::getMinFrameCount(
        size_t* frameCount,
        audio_stream_type_t streamType,
        uint32_t sampleRate)
{
    if (frameCount == NULL) {
        return BAD_VALUE;
    }

    // FIXME handle in server, like createTrack_l(), possible missing info:
    //          audio_io_handle_t output
    //          audio_format_t format
    //          audio_channel_mask_t channelMask
    //          audio_output_flags_t flags (FAST)
    uint32_t afSampleRate;
    status_t status;
    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);//查询硬件支持的采样率大小,一般返回支持的峰值
    if (status != NO_ERROR) {
        ALOGE("Unable to query output sample rate for stream type %d; status %d",
                streamType, status);
        return status;
    }
    size_t afFrameCount;
    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);//查询硬件内部的缓冲区大小,以帧为单位
    if (status != NO_ERROR) {
        ALOGE("Unable to query output frame count for stream type %d; status %d",
                streamType, status);
        return status;
    }
    uint32_t afLatency;
    status = AudioSystem::getOutputLatency(&afLatency, streamType);//查询硬件的延时时间信息
    if (status != NO_ERROR) {
        ALOGE("Unable to query output latency for stream type %d; status %d",
                streamType, status);
        return status;
    }

    // When called from createTrack, speed is 1.0f (normal speed).
    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
            /*, 0 notificationsPerBufferReq*/);//计算所需的最小帧个数

    // The formula above should always produce a non-zero value under normal circumstances:
    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
    // Return error in the unlikely event that it does not, as that's part of the API contract.
    if (*frameCount == 0) {
        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
                streamType, sampleRate);
        return BAD_VALUE;
    }
    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
            *frameCount, afFrameCount, afSampleRate, afLatency);
    return NO_ERROR;
}
这里的AudioSystem也存在于Native空间;类似地,Java空间也有AudioSystem类。它们之间的关系与AudioTrack类似。AudioSystem可以看做是一个工具类,它封装了很多查询Audio设备信息的方法供外界调用,这些方法的内部一般都借助AudioFlinger实现。在查询了硬件信息之后,就会去计算此时需要的最小帧个数:
// Must match similar computation in createTrack_l in Threads.cpp.
// TODO: Move to a common library
static size_t calculateMinFrameCount(
        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
        uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
{
    // Ensure that buffer depth covers at least audio hardware latency
    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);//硬件内部缓冲区的最小个数,最低为2个;以帧为单位
    if (minBufCount < 2) {
        minBufCount = 2;
    }
#if 0
    // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
    // but keeping the code here to make it easier to add later.
    if (minBufCount < notificationsPerBufferReq) {
        minBufCount = notificationsPerBufferReq;
    }
#endif
    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
            "sampleRate %u  speed %f  minBufCount: %u" /*"  notificationsPerBufferReq %u"*/,
            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
            /*, notificationsPerBufferReq*/);
    return minBufCount * sourceFramesNeededWithTimestretch(
            sampleRate, afFrameCount, afSampleRate, speed);
}
然后返回该结果。接着计算最小Buffer大小:
if (audio_has_proportional_frames(format)) {//只在PCM和IEC61937时,返回TRUE
        const size_t bytesPerSample = audio_bytes_per_sample(format);//返回该格式下每个采样点有多少字节数据
        return frameCount * channelCount * bytesPerSample;//一单位的帧 = 一个采样点的字节数 X 声道数;一单位的帧数据 X 所需帧的最小个数 = 这里的min_buf_size.
    } else {
        return frameCount;
    }
根据分析场景,最后的Buffer大小就是:
frameCount * channelCount * bytesPerSample

  1. 一单位的帧 = 一个采样点的字节数 X 声道数;
  2. min_buf_size.=一单位的帧数据 X 所需帧的最小个数;

至此,我们就得到了所需的min_buf_size。这部分内容我们只会分析到JNI层,不过由于该部分内容中涉及Native AudioTrack的部分都较为简单,索性就一起分析了。


二、native_setup()


得到了所需的min_buf_size后,我们就会去创建Java AudioTrack实例,准备播放了;Java AudioTrack对象的创建最好会调用到native_setup()函数,它的JNI实现是:

// ----------------------------------------------------------------------------
static jint
android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this, jobject jaa,
        jintArray jSampleRate, jint channelPositionMask, jint channelIndexMask,
        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession,
        jlong nativeAudioTrack/*0*/) {

    ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d"
        "nativeAudioTrack=0x%llX",
        jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,
        nativeAudioTrack);

    sp<AudioTrack> lpTrack = 0;

    if (jSession == NULL) {
        ALOGE("Error creating AudioTrack: invalid session ID pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }

    jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }
    audio_session_t sessionId = (audio_session_t) nSession[0];
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

    AudioTrackJniStorage* lpJniStorage = NULL;//保存一些重要信息

    audio_attributes_t *paa = NULL;//保存AudioAttributes中的一些设置信息

    jclass clazz = env->GetObjectClass(thiz);//获取Java空间AudioTrack的类对象
    if (clazz == NULL) {
        ALOGE("Can't find %s when setting up callback.", kClassPathName);
        return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
    }

    // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.
    if (nativeAudioTrack == 0) {//nativeAudioTrack判别当前是否需要重新构建Native AudioTrack
        if (jaa == 0) {
            ALOGE("Error creating AudioTrack: invalid audio attributes");
            return (jint) AUDIO_JAVA_ERROR;
        }

        if (jSampleRate == 0) {
            ALOGE("Error creating AudioTrack: invalid sample rates");
            return (jint) AUDIO_JAVA_ERROR;
        }

        int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL);
        int sampleRateInHertz = sampleRates[0];
        env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT);

        // Invalid channel representations are caught by !audio_is_output_channel() below.
        audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(
                channelPositionMask, channelIndexMask);
        if (!audio_is_output_channel(nativeChannelMask)) {
            ALOGE("Error creating AudioTrack: invalid native channel mask %#x.", nativeChannelMask);
            return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
        }

        uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);

        // check the format.
        // This function was called from Java, so we compare the format against the Java constants
        audio_format_t format = audioFormatToNative(audioFormat);
        if (format == AUDIO_FORMAT_INVALID) {
            ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);
            return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
        }

        // compute the frame count
        size_t frameCount;
        if (audio_is_linear_pcm(format)) {
            const size_t bytesPerSample = audio_bytes_per_sample(format);
            frameCount = buffSizeInBytes / (channelCount * bytesPerSample);
        } else {
            frameCount = buffSizeInBytes;
        }

        // create the native AudioTrack object
        lpTrack = new AudioTrack();//创建 Native AudioTrack,使用无参构造函数,只会初始化一些成员值

        // read the AudioAttributes values
        paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));//初始化paa
        //将AudioAttributes中的一些配置信息保存到paa结构对象中
        const jstring jtags =
                (jstring) env->GetObjectField(jaa, javaAudioAttrFields.fieldFormattedTags);
        const char* tags = env->GetStringUTFChars(jtags, NULL);
        // copying array size -1, char array for tags was calloc'd, no need to NULL-terminate it
        strncpy(paa->tags, tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
        env->ReleaseStringUTFChars(jtags, tags);
        paa->usage = (audio_usage_t) env->GetIntField(jaa, javaAudioAttrFields.fieldUsage);
        paa->content_type =
                (audio_content_type_t) env->GetIntField(jaa, javaAudioAttrFields.fieldContentType);
        paa->flags = env->GetIntField(jaa, javaAudioAttrFields.fieldFlags);

        ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",
                paa->usage, paa->content_type, paa->flags, paa->tags);

        // initialize the callback information:
        // this data will be passed with every AudioTrack callback
        lpJniStorage = new AudioTrackJniStorage();//创造AudioTrackJniStorage实例

		//将一些信息保存到AudioTrackJniStorage::mCallbackData结构对象中,用于后续的回调
        lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);//指向Java AudioTrack类实例
        // we use a weak reference so the AudioTrack object can be garbage collected.
        lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);//指向Java AudioTrack实例的弱引用
        lpJniStorage->mCallbackData.busy = false;

        // initialize the native AudioTrack object
        status_t status = NO_ERROR;
        switch (memoryMode) {
        case MODE_STREAM:

            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    frameCount,
                    AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    0,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    AudioTrack::TRANSFER_SYNC,
                    NULL,                         // default offloadInfo
                    -1, -1,                       // default uid, pid values
                    paa);
            break;

        case MODE_STATIC:
            // AudioTrack is using shared memory

            if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
                ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
                goto native_init_failure;
            }

            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    frameCount,
                    AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    lpJniStorage->mMemBase,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    AudioTrack::TRANSFER_SHARED,
                    NULL,                         // default offloadInfo
                    -1, -1,                       // default uid, pid values
                    paa);
            break;

        default:
            ALOGE("Unknown mode %d", memoryMode);
            goto native_init_failure;
        }

        if (status != NO_ERROR) {
            ALOGE("Error %d initializing AudioTrack", status);
            goto native_init_failure;
        }
    } else {  // end if (nativeAudioTrack == 0)
        lpTrack = (AudioTrack*)nativeAudioTrack;
        // TODO: We need to find out which members of the Java AudioTrack might
        // need to be initialized from the Native AudioTrack
        // these are directly returned from getters:
        //  mSampleRate
        //  mAudioFormat
        //  mStreamType
        //  mChannelConfiguration
        //  mChannelCount
        //  mState (?)
        //  mPlayState (?)
        // these may be used internally (Java AudioTrack.audioParamCheck():
        //  mChannelMask
        //  mChannelIndexMask
        //  mDataLoadMode

        // initialize the callback information:
        // this data will be passed with every AudioTrack callback
        lpJniStorage = new AudioTrackJniStorage();
        lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
        // we use a weak reference so the AudioTrack object can be garbage collected.
        lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
        lpJniStorage->mCallbackData.busy = false;
    }

    nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        goto native_init_failure;
    }
    // read the audio session ID back from AudioTrack in case we create a new session
    nSession[0] = lpTrack->getSessionId();
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

    {
        const jint elements[1] = { (jint) lpTrack->getSampleRate() };
        env->SetIntArrayRegion(jSampleRate, 0, 1, elements);
    }

    {   // scope for the lock
        Mutex::Autolock l(sLock);
        sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);
    }
    // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
    // of the Java object (in mNativeTrackInJavaObj)
    setAudioTrack(env, thiz, lpTrack);//将Native AudioTrack的实例地址保存到Java AudioTrack::mNativeTrackInJavaObj字段中,绑定两对象

    // save the JNI resources so we can free them later
    //ALOGV("storing lpJniStorage: %x\n", (long)lpJniStorage);
    env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);

    // since we had audio attributes, the stream type was derived from them during the
    // creation of the native AudioTrack: push the same value to the Java object
    env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());
    if (paa != NULL) {
        // audio attributes were copied in AudioTrack creation
        free(paa);
        paa = NULL;
    }


    return (jint) AUDIO_JAVA_SUCCESS;

    // failures:
native_init_failure:
    if (paa != NULL) {
        free(paa);
    }
    if (nSession != NULL) {
        env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    }
    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);
    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);
    delete lpJniStorage;
    env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);

    // lpTrack goes out of scope, so reference count drops to zero
    return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}
重要地,首先会去创建Native AudioTrack和AudioTrackJniStorage对象实例。AudioTrack对象的无参构造较为简单,只会涉及一些变量、状态的初始化操作:
//调用无参构造函数;初始化状态和一些变量
AudioTrack::AudioTrack()
    : mStatus(NO_INIT), //当前AudioTrack的内部状态
      mState(STATE_STOPPED), //当前Track的运行状态
      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
      mPreviousSchedulingGroup(SP_DEFAULT),
      mPausedPosition(0),
      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
    mAttributes.flags = 0x0;
    strcpy(mAttributes.tags, "");
}
而AudioTrackJniStorage类型更像是一个回调、工具类:
class AudioTrackJniStorage {
    public:
        sp<MemoryHeapBase>         mMemHeap;
        sp<MemoryBase>             mMemBase;
        audiotrack_callback_cookie mCallbackData;
        sp<JNIDeviceCallback>      mDeviceCallback;

    AudioTrackJniStorage() {
        mCallbackData.audioTrack_class = 0;//Java AudioTrack类型实例
        mCallbackData.audioTrack_ref = 0;//Java AudioTrack对象实例的弱引用
    }

    ~AudioTrackJniStorage() {
        mMemBase.clear();
        mMemHeap.clear();
    }

    bool allocSharedMem(int sizeInBytes) {
        mMemHeap = new MemoryHeapBase(sizeInBytes, 0, "AudioTrack Heap Base");
        if (mMemHeap->getHeapID() < 0) {
            return false;
        }
        mMemBase = new MemoryBase(mMemHeap, 0, sizeInBytes);
        return true;
    }
};
class JNIDeviceCallback: public AudioSystem::AudioDeviceCallback
{
public:
    JNIDeviceCallback(JNIEnv* env, jobject thiz, jobject weak_thiz, jmethodID postEventFromNative);
    ~JNIDeviceCallback();

    virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo,
                                     audio_port_handle_t deviceId);

private:
    void sendEvent(int event);

    jclass      mClass;     // Reference to AudioTrack/AudioRecord class
    jobject     mObject;    // Weak ref to AudioTrack/AudioRecord Java object to call on
    jmethodID   mPostEventFromNative; // postEventFromNative method ID.
};
struct audiotrack_callback_cookie {
    jclass      audioTrack_class;
    jobject     audioTrack_ref;
    bool        busy;
    Condition   cond;
};
其中MemoryHeapBase、MemoryBase是两个Binder类,它们都使用Binder进行进程间通信,同时它们都与共享内存的操作有关。AudioTrack与AudioFlinger之间就是采用共享内存技术来交互音频数据的。我们看下它们的具体类型定义:
class MemoryBase : public BnMemory 
{
public:
    MemoryBase(const sp<IMemoryHeap>& heap, ssize_t offset, size_t size);
    virtual ~MemoryBase();
    virtual sp<IMemoryHeap> getMemory(ssize_t* offset, size_t* size) const;

protected:
    size_t getSize() const { return mSize; }//返回内存区域大小
    ssize_t getOffset() const { return mOffset; }
    const sp<IMemoryHeap>& getHeap() const { return mHeap; }//返回创建共享内存的MemoryHeapBase实例

private:
    size_t          mSize;//共享内存的区域大小
    ssize_t         mOffset;//偏移量
    sp<IMemoryHeap> mHeap;//创建共享内存的MemoryHeapBase实例
};
class MemoryHeapBase : public virtual BnMemoryHeap
{
public:
    enum {
        READ_ONLY = IMemoryHeap::READ_ONLY,
        // memory won't be mapped locally, but will be mapped in the remote
        // process.
        DONT_MAP_LOCALLY = 0x00000100,
        NO_CACHING = 0x00000200
    };

    /*
     * maps the memory referenced by fd. but DOESN'T take ownership
     * of the filedescriptor (it makes a copy with dup()
     */
    MemoryHeapBase(int fd, size_t size, uint32_t flags = 0, uint32_t offset = 0);

    /*
     * maps memory from the given device
     */
    MemoryHeapBase(const char* device, size_t size = 0, uint32_t flags = 0);

    /*
     * maps memory from ashmem, with the given name for debugging
     */
    MemoryHeapBase(size_t size, uint32_t flags = 0, char const* name = NULL);

    virtual ~MemoryHeapBase();

    /* implement IMemoryHeap interface */
    virtual int         getHeapID() const;

    /* virtual address of the heap. returns MAP_FAILED in case of error */
    virtual void*       getBase() const;

    virtual size_t      getSize() const;
    virtual uint32_t    getFlags() const;
    virtual uint32_t    getOffset() const;

    const char*         getDevice() const;

    /* this closes this heap -- use carefully */
    void dispose();

    /* this is only needed as a workaround, use only if you know
     * what you are doing */
    status_t setDevice(const char* device) {
        if (mDevice == 0)
            mDevice = device;
        return mDevice ? NO_ERROR : ALREADY_EXISTS;
    }

protected:
            MemoryHeapBase();
    // init() takes ownership of fd
    status_t init(int fd, void *base, int size,
            int flags = 0, const char* device = NULL);

private:
    status_t mapfd(int fd, size_t size, uint32_t offset = 0);

    int         mFD;//共享内存的文件描述符
    size_t      mSize;//创建的共享内存的大小
    void*       mBase;//创建共享内存的首地址
    uint32_t    mFlags;
    const char* mDevice;
    bool        mNeedUnmap;
    uint32_t    mOffset;//
};

AudioTrackJniStorage提供了创建共享内存的函数allocSharedMem():

bool allocSharedMem(int sizeInBytes) {
        mMemHeap = new MemoryHeapBase(sizeInBytes, 0, "AudioTrack Heap Base");
        if (mMemHeap->getHeapID() < 0) {
            return false;
        }
        mMemBase = new MemoryBase(mMemHeap, 0, sizeInBytes);
        return true;
    }

参数是指定的要创建的共享内存大小。由函数、类型定义可知,MemoryBase通过MemoryHeapBase可以获取共享内存的具体信息,MemoryHeapBase具体实现共享内存的创建操作。

分析MemoryHeapBase的创建过程:

MemoryHeapBase::MemoryHeapBase(size_t size, uint32_t flags, char const * name)
    : mFD(-1), mSize(0), mBase(MAP_FAILED), mFlags(flags),
      mDevice(0), mNeedUnmap(false), mOffset(0)
{
    const size_t pagesize = getpagesize();
    size = ((size + pagesize-1) & ~(pagesize-1));
    int fd = ashmem_create_region(name == NULL ? "MemoryHeapBase" : name, size);//libcutils提供,打开/dev/ashmem设备,并获得一个fd;创建一块指定大小的内存.
    ALOGE_IF(fd<0, "error creating ashmem region: %s", strerror(errno));
    if (fd >= 0) {
        if (mapfd(fd, size) == NO_ERROR) {//mmap映射,得到内存地址;并保存与该内存区域有关的信息
            if (flags & READ_ONLY) {
                ashmem_set_prot_region(fd, PROT_READ);
            }
        }
    }
}
MemoryHeapBase的创建过程,实际就是某块共享内存的创建、初始化过程。如果创建内存返回的文件描述符合法,还要将它映射到内存区域中:
status_t MemoryHeapBase::mapfd(int fd, size_t size, uint32_t offset)
{
    if (size == 0) {
        // try to figure out the size automatically
        struct stat sb;
        if (fstat(fd, &sb) == 0)
            size = sb.st_size;
        // if it didn't work, let mmap() fail.
    }

    if ((mFlags & DONT_MAP_LOCALLY) == 0) {
        void* base = (uint8_t*)mmap(0, size,
                PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset);
        if (base == MAP_FAILED) {
            ALOGE("mmap(fd=%d, size=%u) failed (%s)",
                    fd, uint32_t(size), strerror(errno));
            close(fd);
            return -errno;
        }
        //ALOGD("mmap(fd=%d, base=%p, size=%lu)", fd, base, size);
        mBase = base;
        mNeedUnmap = true;
    } else  {
        mBase = 0; // not MAP_FAILED
        mNeedUnmap = false;
    }
    mFD = fd;
    mSize = size;
    mOffset = offset;
    return NO_ERROR;
}
mapfd()函数中还会保存这块共享内存的一些信息,如果它对应的文件描述符、内存首地址等等。allocSharedMem()函数随后会创建MemoryBase对象:
MemoryBase::MemoryBase(const sp<IMemoryHeap>& heap,
        ssize_t offset, size_t size)
    : mSize(size), mOffset(offset), mHeap(heap)//初始化成员
{
}

sp<IMemoryHeap> MemoryBase::getMemory(ssize_t* offset, size_t* size) const
{
    if (offset) *offset = mOffset;//返回内存地址偏移量
    if (size)   *size = mSize;//返回内存区域的大小
    return mHeap;//返回创建共享内存的MemoryHeapBase实例
}

MemoryBase::~MemoryBase()
{
}
它的创建很简单,就是保存之前创建的MemoryHeapBase实例。


返回到android_media_AudioTrack_setup()函数,在创建完AudioTrack、AudioTrackJniStorage,并执行了一些初始化动作后,就会去执行AudioTrack::set()函数。该函数的调用与上层传下的memoryMode有关,即跟STREAM/STATIC模式有关。两种模式下,该函数的调用略有不同:

        switch (memoryMode) {
        case MODE_STREAM:

            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    frameCount,
                    AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    0,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    AudioTrack::TRANSFER_SYNC,
                    NULL,                         // default offloadInfo
                    -1, -1,                       // default uid, pid values
                    paa);
            break;

        case MODE_STATIC:
            // AudioTrack is using shared memory

            if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
                ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
                goto native_init_failure;
            }

            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    frameCount,
                    AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    lpJniStorage->mMemBase,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    AudioTrack::TRANSFER_SHARED,
                    NULL,                         // default offloadInfo
                    -1, -1,                       // default uid, pid values
                    paa);
            break;

        default:
            ALOGE("Unknown mode %d", memoryMode);
            goto native_init_failure;
        }
参考AudioTrack::set()的函数定义:
status_t AudioTrack::set(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        audio_output_flags_t flags,
        callback_t cbf,
        void* user,
        int32_t notificationFrames,
        const sp<IMemory>& sharedBuffer,
        bool threadCanCallJava,
        audio_session_t sessionId,
        transfer_type transferType,
        const audio_offload_info_t *offloadInfo,
        int uid,
        pid_t pid,
        const audio_attributes_t* pAttributes,
        bool doNotReconnect,
        float maxRequiredSpeed)
我们发现在STREAM模式下,我们不会传入共享内存,此时所需的共享内存会由AudioFlinger创建;STATIC模式下,则会在先创建共享内存区域,并传入它的MemoryBase实例。
AudioTrack::set()调用之后的处理都较为简单,有一点需要注意的就是,这里会把创建的Native AudioTrack实例与Java AudioTrack进行关联:
    setAudioTrack(env, thiz, lpTrack);//将Native AudioTrack的实例地址保存到Java AudioTrack::mNativeTrackInJavaObj字段中
static sp<AudioTrack> setAudioTrack(JNIEnv* env, jobject thiz, const sp<AudioTrack>& at)
{
    Mutex::Autolock l(sLock);
    sp<AudioTrack> old =
            (AudioTrack*)env->GetLongField(thiz, javaAudioTrackFields.nativeTrackInJavaObj);
    if (at.get()) {
        at->incStrong((void*)setAudioTrack);
    }
    if (old != 0) {
        old->decStrong((void*)setAudioTrack);
    }
    env->SetLongField(thiz, javaAudioTrackFields.nativeTrackInJavaObj, (jlong)at.get());
    return old;
}
此时,Java AudioTrack保存了一份Native AudioTrack实例的对象地址信息;后续的操作,都会基于这样一种Java/Native对象实例的一一对应关系来进行。其他的内容就不再赘述。


分析完该部分后,我们得到了一个重要的函数调用AudioTrack::set()。

三、native_start()


在创建完Java AudioTrack实例后,我们会调用play()函数准备进行播放了,最后会调用到native_start()函数,它对应的JNI实现是:

static void
android_media_AudioTrack_start(JNIEnv *env, jobject thiz)
{
    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
    if (lpTrack == NULL) {
        jniThrowException(env, "java/lang/IllegalStateException",
            "Unable to retrieve AudioTrack pointer for start()");
        return;
    }

    lpTrack->start();
}
static sp<AudioTrack> getAudioTrack(JNIEnv* env, jobject thiz)
{
    Mutex::Autolock l(sLock);
    AudioTrack* const at =
            (AudioTrack*)env->GetLongField(thiz, javaAudioTrackFields.nativeTrackInJavaObj);
    return sp<AudioTrack>(at);
}
首先获取到之前创建的Native AudioTrack实例,并调用它的start()函数。


这里我们得到了一个重要的函数调用AudioTrack::start()。


四、native_write_byte()/native_write_short()


准备播放动作完成后,我们会向AudioTrack写入数据,它对应的JNI函数实现是:
// ----------------------------------------------------------------------------
static jint android_media_AudioTrack_write_native_bytes(JNIEnv *env,  jobject thiz,
        jbyteArray javaBytes, jint byteOffset, jint sizeInBytes,
        jint javaAudioFormat, jboolean isWriteBlocking) {
    //ALOGV("android_media_AudioTrack_write_native_bytes(offset=%d, sizeInBytes=%d) called",
    //    offsetInBytes, sizeInBytes);
    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
    if (lpTrack == NULL) {
        jniThrowException(env, "java/lang/IllegalStateException",
                "Unable to retrieve AudioTrack pointer for write()");
        return (jint)AUDIO_JAVA_INVALID_OPERATION;
    }

    ScopedBytesRO bytes(env, javaBytes);
    if (bytes.get() == NULL) {
        ALOGE("Error retrieving source of audio data to play, can't play");
        return (jint)AUDIO_JAVA_BAD_VALUE;
    }

    jint written = writeToTrack(lpTrack, javaAudioFormat, bytes.get(), byteOffset,
            sizeInBytes, isWriteBlocking == JNI_TRUE /* blocking */);

    return written;
}
最终调用writeToTrack()函数:
// ----------------------------------------------------------------------------
template <typename T>
static jint writeToTrack(const sp<AudioTrack>& track, jint audioFormat, const T *data,
                         jint offsetInSamples, jint sizeInSamples, bool blocking) {
    // give the data to the native AudioTrack object (the data starts at the offset)
    ssize_t written = 0;
    // regular write() or copy the data to the AudioTrack's shared memory?
    size_t sizeInBytes = sizeInSamples * sizeof(T);
    if (track->sharedBuffer() == 0) {//STREAM模式,要将数据一次次写入Native AudioTrack中
        written = track->write(data + offsetInSamples, sizeInBytes, blocking);
        // for compatibility with earlier behavior of write(), return 0 in this case
        if (written == (ssize_t) WOULD_BLOCK) {
            written = 0;
        }
    } else {//STATIC模式,直接将数据拷贝至共享内存中
        // writing to shared memory, check for capacity
        if ((size_t)sizeInBytes > track->sharedBuffer()->size()) {
            sizeInBytes = track->sharedBuffer()->size();
        }
        memcpy(track->sharedBuffer()->pointer(), data + offsetInSamples, sizeInBytes);
        written = sizeInBytes;
    }
    if (written >= 0) {
        return written / sizeof(T);
    }
    return interpretWriteSizeError(written);
}
如果是STREAM模式,我们会持续地将数据写入到Native AudioTrack中,这里有一个重要的函数调用是AudioTrack::write();如果是STATIC模式,我们会一次性将数据拷贝到共享内存区域中。


五、natvie_stop()


当我们将数据写入完毕、播放完成时,会调用stop函数停止AudioTrack的播放,此时该函数对应的JNI函数实现是:

// ----------------------------------------------------------------------------
static void
android_media_AudioTrack_stop(JNIEnv *env, jobject thiz)
{
    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
    if (lpTrack == NULL) {
        jniThrowException(env, "java/lang/IllegalStateException",
            "Unable to retrieve AudioTrack pointer for stop()");
        return;
    }

    lpTrack->stop();
}
直接调用Native AudioTrack::stop()函数。


六、native_release()


AudioTrack对象由于会涉及到很多的系统资源,我们调用了AudioTrack::stop()函数后,还需调用realease()函数,释放相关的系统资源,它对应的JNI函数实现是:
#define CALLBACK_COND_WAIT_TIMEOUT_MS 1000
static void android_media_AudioTrack_release(JNIEnv *env,  jobject thiz) {
    sp<AudioTrack> lpTrack = setAudioTrack(env, thiz, 0);
    if (lpTrack == NULL) {
        return;
    }
    //ALOGV("deleting lpTrack: %x\n", (int)lpTrack);

    // delete the JNI data
    AudioTrackJniStorage* pJniStorage = (AudioTrackJniStorage *)env->GetLongField(
        thiz, javaAudioTrackFields.jniData);
    // reset the native resources in the Java object so any attempt to access
    // them after a call to release fails.
    env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);

    if (pJniStorage) {
        Mutex::Autolock l(sLock);
        audiotrack_callback_cookie *lpCookie = &pJniStorage->mCallbackData;
        //ALOGV("deleting pJniStorage: %x\n", (int)pJniStorage);
        while (lpCookie->busy) {
            if (lpCookie->cond.waitRelative(sLock,
                                            milliseconds(CALLBACK_COND_WAIT_TIMEOUT_MS)) !=
                                                    NO_ERROR) {
                break;
            }
        }
        sAudioTrackCallBackCookies.remove(lpCookie);
        // delete global refs created in native_setup
        env->DeleteGlobalRef(lpCookie->audioTrack_class);
        env->DeleteGlobalRef(lpCookie->audioTrack_ref);
        delete pJniStorage;
    }
}
函数处理很直观,就是完成对之前setup()中创建的对象的清理动作。

到这里,AudioTrack JNI部分的分析就结束了,从上面的分析流程可以看出,之后的工作区间会在Native AudioTrack中。我们整理下分析过程结束后所涉及到的几个重要函数调用:

  • AudioTrack::set()
  • AudioTrack::start()
  • AudioTrack::write()
  • AudioTrack::stop(

所以,下一步,我们会进入Native AudioTrack空间,继续我们的分析流程。


  • 1
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值