android 音频总结2 ----mark

本文详细介绍了Android系统中AudioTrack的创建过程,包括静态和流式两种模式下音频数据的处理。在静态模式下,APP创建共享内存;在流式模式下,播放线程创建共享内存。通过环形缓冲区实现APP与播放线程的数据同步。测试程序展示了AudioTrack的使用,并探讨了不同模式下的数据同步机制。同时,文章还提及了AudioMixer中处理音频数据的过程,如重采样、混音等操作。
摘要由CSDN通过智能技术生成

4.9 AudioTrack创建过程_Track和共享内存

回顾:

a. APP创建AudioTrack <-----------------> AudioFlinger中PlaybackThread创建对应的Track

b. APP给AudioTrack提供音频数据有2种方式: 一次性提供(MODE_STATIC)、边播放边提供(MODE_STREAM)

问:

a. 音频数据存在buffer中, 这个buffer由谁提供? APP 还是 PlaybackThread ?

b. APP提供数据, PlaybackThread消耗数据, 如何同步?

共享内存有谁创建

a.  MODE_STATIC(一次性提前提供数据)  由APP创建共享内存( app可知Buffer大小)

b. MODE_STREAM(边播放边提供)  由playbakcThread创建共享内存(为了让APP简单,省事)

APP: playbackThread如何同步数据?

a.  MODE_STATIC:   无需同步,APP先构造,playbackThread后消费

b. MODE_STREAM:   需同步,使用环行BUFFER来同步

测试程序:

Shared_mem_test.cpp (z:\android-5.0.2\frameworks\base\media\tests\audiotests)  

int AudioTrackTest::Test01() {

    sp heap;

    sp iMem;

    uint8_t* p;

    short smpBuf[BUF_SZ];

    long rate = 44100;

    unsigned long phi;

    unsigned long dPhi;

    long amplitude;

    long freq = 1237;

    float f0;

    f0 = pow(2., 32.) * freq / (float)rate;

    dPhi = (unsigned long)f0;

    amplitude = 1000;

    phi = 0;

    Generate(smpBuf, BUF_SZ, amplitude, phi, dPhi);  // fill buffer

    for (int i = 0; i < 1024; i++) {

        // 事先分配好内存

        heap = new MemoryDealer(1024*1024, "AudioTrack Heap Base");

        iMem = heap->allocate(BUF_SZ*sizeof(short));

        p = static_cast(iMem->pointer());

        memcpy(p, smpBuf, BUF_SZ*sizeof(short));

        sp track = new AudioTrack(AUDIO_STREAM_MUSIC,// stream type

               rate,

               AUDIO_FORMAT_PCM_16_BIT,// word length, PCM

               AUDIO_CHANNEL_OUT_MONO,

               iMem);

        status_t status = track->initCheck();

        if(status != NO_ERROR) {

            track.clear();

            ALOGD("Failed for initCheck()");

            return -1;

        }

        // start play

        ALOGD("start");

        track->start();

        usleep(20000);

        ALOGD("stop");

        track->stop();

        iMem.clear();

        heap.clear();

        usleep(20000);

    }

    return 0;

}

MediaAudioTrackTest.java (z:\android-5.0.2\frameworks\base\media\tests\mediaframeworktest\src\com\android\mediaframeworktest\functional\audio)   

    //Test case 4: setPlaybackHeadPosition() beyond what has been written

    @LargeTest

    public void testSetPlaybackHeadPositionTooFar() throws Exception {

        // constants for test

        final String TEST_NAME = "testSetPlaybackHeadPositionTooFar";

        final int TEST_SR = 22050;

        final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;

        final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;

        final int TEST_MODE = AudioTrack.MODE_STREAM;

        final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;

 

        //-------- initialization --------------

        int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);

        AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,

                2*minBuffSize, TEST_MODE);

        byte data[] = new byte[minBuffSize];

        // make up a frame index that's beyond what has been written: go from buffer size to frame

        //   count (given the audio track properties), and add 77.

        int frameIndexTooFar = (2*minBuffSize/2) + 77;

        //--------    test        --------------

        assumeTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);

        track.write(data, 0, data.length);

        track.write(data, 0, data.length);

        track.play();

        track.stop();

        assumeTrue(TEST_NAME, track.getPlayState() == AudioTrack.PLAYSTATE_STOPPED);

        assertTrue(TEST_NAME, track.setPlaybackHeadPosition(frameIndexTooFar) == AudioTrack.ERROR_BAD_VALUE);

        //-------- tear down      --------------

        track.release();

    }

    /**

     * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.

     * @param attributes a non-null {@link AudioAttributes} instance.

     * @param format a non-null {@link AudioFormat} instance describing the format of the data

     *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for

     *     configuring the audio format parameters such as encoding, channel mask and sample rate.

     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read

     *   from for playback. If using the AudioTrack in streaming mode, you can write data into

     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,

     *   this is the maximum size of the sound that will be played for this instance.

     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size

     *   for the successful creation of an AudioTrack instance in streaming mode. Using values

     *   smaller than getMinBufferSize() will result in an initialization failure.

     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.

     * @param sessionId ID of audio session the AudioTrack must be attached to, or

     *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction

     *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before

     *   construction.

     * @throws IllegalArgumentException

     */

    public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,

            int mode, int sessionId)

                    throws IllegalArgumentException {

        // mState already == STATE_UNINITIALIZED

        if (attributes == null) {

            throw new IllegalArgumentException("Illegal null AudioAttributes");

        }

        if (format == null) {

            throw new IllegalArgumentException("Illegal null AudioFormat");

        }

        // remember which looper is associated with the AudioTrack instantiation

        Looper looper;

        if ((looper = Looper.myLooper()) == null) {

            looper = Looper.getMainLooper();

        }

        int rate = 0;

        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0)

        {

            rate = format.getSampleRate();

        } else {

            rate = AudioSystem.getPrimaryOutputSamplingRate();

            if (rate <= 0) {

                rate = 44100;

            }

        }

        int channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;

        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0)

        {

            channelMask = format.getChannelMask();

        }

        int encoding = AudioFormat.ENCODING_DEFAULT;

        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {

            encoding = format.getEncoding();

        }

        audioParamCheck(rate, channelMask, encoding, mode);

        mStreamType = AudioSystem.STREAM_DEFAULT;

        audioBuffSizeCheck(bufferSizeInBytes);

        mInitializationLooper = looper;

        IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);

        mAppOps = IAppOpsService.Stub.asInterface(b);

        mAttributes = (new AudioAttributes.Builder(attributes).build());

        if (sessionId < 0) {

            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);

        }

        int[] session = new int[1];

        session[0] = sessionId;

        // native initialization

        int initResult = native_setup(new WeakReference(this), mAttributes,

                mSampleRate, mChannels, mAudioFormat,

                mNativeBufferSizeInBytes, mDataLoadMode, session);

        if (initResult != SUCCESS) {

            loge("Error code "+initResult+" when initializing AudioTrack.");

            return; // with mState == STATE_UNINITIALIZED

        }

        mSessionId = session[0];

        if (mDataLoadMode == MODE_STATIC) {

            mState = STATE_NO_STATIC_DATA;

        } else {

            mState = STATE_INITIALIZED;

        }

    }

android_media_AudioTrack.cpp (z:\android-5.0.2\frameworks\base\core\jni)   

// ---------------------------------------------------------------------------

static JNINativeMethod gMethods[] = {

    // name,              signature,     funcPtr

    {"native_start",         "()V",      (void *)android_media_AudioTrack_start},

    {"native_stop",          "()V",      (void *)android_media_AudioTrack_stop},

    {"native_pause",         "()V",      (void *)android_media_AudioTrack_pause},

    {"native_flush",         "()V",      (void *)android_media_AudioTrack_flush},

    {"native_setup",     "(Ljava/lang/Object;Ljava/lang/Object;IIIII[I)I",

                                         (void *)android_media_AudioTrack_setup},

    {"native_finalize",      "()V",      (void *)android_media_AudioTrack_finalize},

    {"native_release",       "()V",      (void *)android_media_AudioTrack_release},

    {"native_write_byte",    "([BIIIZ)I",(void *)android_media_AudioTrack_write_byte},

    {"native_write_native_bytes",

                             "(Ljava/lang/Object;IIIZ)I",

                                         (void *)android_media_AudioTrack_write_native_bytes},

    {"native_write_short",   "([SIII)I", (void *)android_media_AudioTrack_write_short},

    {"native_write_float",   "([FIIIZ)I",(void *)android_media_AudioTrack_write_float},

    {"native_setVolume",     "(FF)V",    (void *)android_media_AudioTrack_set_volume},

    {"native_get_native_frame_count",

                             "()I",      (void *)android_media_AudioTrack_get_native_frame_count},

    {"native_set_playback_rate",

                             "(I)I",     (void *)android_media_AudioTrack_set_playback_rate},

    {"native_get_playback_rate",

                             "()I",      (void *)android_media_AudioTrack_get_playback_rate},

    {"native_set_marker_pos","(I)I",     (void *)android_media_AudioTrack_set_marker_pos},

    {"native_get_marker_pos","()I",      (void *)android_media_AudioTrack_get_marker_pos},

    {"native_set_pos_update_period",

                             "(I)I",     (void *)android_media_AudioTrack_set_pos_update_period},

    {"native_get_pos_update_period",

                             "()I",      (void *)android_media_AudioTrack_get_pos_update_period},

    {"native_set_position",  "(I)I",     (void *)android_media_AudioTrack_set_position},

    {"native_get_position",  "()I",      (void *)android_media_AudioTrack_get_position},

    {"native_get_latency",   "()I",      (void *)android_media_AudioTrack_get_latency},

    {"native_get_timestamp", "([J)I",    (void *)android_media_AudioTrack_get_timestamp},

    {"native_set_loop",      "(III)I",   (void *)android_media_AudioTrack_set_loop},

    {"native_reload_static", "()I",      (void *)android_media_AudioTrack_reload},

    {"native_get_output_sample_rate",

                             "(I)I",      (void *)android_media_AudioTrack_get_output_sample_rate},

    {"native_get_min_buff_size",

                             "(III)I",   (void *)android_media_AudioTrack_get_min_buff_size},

    {"native_setAuxEffectSendLevel",

                             "(F)I",     (void *)android_media_AudioTrack_setAuxEffectSendLevel},

    {"native_attachAuxEffect",

                             "(I)I",     (void *)android_media_AudioTrack_attachAuxEffect},

};

android_media_AudioTrack.cpp (z:\android-5.0.2\frameworks\base\core\jni)  

// ----------------------------------------------------------------------------

static jint

android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,

        jobject jaa,

        jint sampleRateInHertz, jint javaChannelMask,

        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession) {

    ALOGV("sampleRate=%d, audioFormat(from Java)=%d, channel mask=%x, buffSize=%d",

        sampleRateInHertz, audioFormat, javaChannelMask, buffSizeInBytes);

    if (jaa == 0) {

        ALOGE("Error creating AudioTrack: invalid audio attributes");

        return (jint) AUDIO_JAVA_ERROR;

    }

    // Java channel masks don't map directly to the native definition, but it's a simple shift

    // to skip the two deprecated channel configurations "default" and "mono".

    audio_channel_mask_t nativeChannelMask = ((uint32_t)javaChannelMask) >> 2;

    if (!audio_is_output_channel(nativeChannelMask)) {

        ALOGE("Error creating AudioTrack: invalid channel mask %#x.", javaChannelMask);

        return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;

    }

    uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);

    // check the format.

    // This function was called from Java, so we compare the format against the Java constants

    audio_format_t format = audioFormatToNative(audioFormat);

    if (format == AUDIO_FORMAT_INVALID) {

        ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);

        return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;

    }

    

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值