Android13 AAudioStream_write流程分析

AAudioAudio.cpp的AAudioStream_write方法:

//frameworks/av/media/libaaudio/src/core/AAudioAudio.cpp
AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream* stream,
                               const void *buffer,
                               int32_t numFrames,
                               int64_t timeoutNanoseconds)
{
    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
    if (buffer == nullptr) {
        return AAUDIO_ERROR_NULL;
    }


    // Don't allow writes when playing with a callback.
    if (audioStream->isDataCallbackActive()) {
        // A developer requested this warning because it would have saved lots of debugging.
        ALOGW("%s() - Cannot write to a callback stream when running.", __func__);
        return AAUDIO_ERROR_INVALID_STATE;
    }


    if (numFrames < 0) {
        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
    } else if (numFrames == 0) {
        return 0;
    }


    aaudio_result_t result = audioStream->write(buffer, numFrames, timeoutNanoseconds);


    return result;
}

AudioStreamInternalPlay::write

调用AudioStream的write方法,AudioStreamInternal和AudioStreamLegacy继承于AudioStream,而AudioStreamInternalPlay继承于AudioStreamInternal,AudioStreamTrack继承于AudioStreamLegacy,因此调用AudioStream的write方法会调用AudioStreamInternalPlay的write方法或AudioStreamTrack的write方法,下面分别分析它们:

//frameworks/av/media/libaaudio/src/core/AudioStreamInternalPlay.cpp
// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
                                               int64_t timeoutNanoseconds) {
    return processData((void *)buffer, numFrames, timeoutNanoseconds);
}

调用processData方法:

std::unique_ptr<AudioEndpoint> mAudioEndpoint;   // source for reads or sink for writes
//frameworks/av/media/libaaudio/src/core/AudioStreamInternal.cpp
aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
                                                 int64_t timeoutNanoseconds)
{
    const char * traceName = "aaProc";
    const char * fifoName = "aaRdy";
    ATRACE_BEGIN(traceName);
    if (ATRACE_ENABLED()) {
        int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
        ATRACE_INT(fifoName, fullFrames);
    }


    aaudio_result_t result = AAUDIO_OK;
    int32_t loopCount = 0;
    uint8_t* audioData = (uint8_t*)buffer;
    int64_t currentTimeNanos = AudioClock::getNanoseconds();
    const int64_t entryTimeNanos = currentTimeNanos;
    const int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
    int32_t framesLeft = numFrames;


    // Loop until all the data has been processed or until a timeout occurs.
    while (framesLeft > 0) {
        // The call to processDataNow() will not block. It will just process as much as it can.
        int64_t wakeTimeNanos = 0;
        aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
                                                  currentTimeNanos, &wakeTimeNanos);
        if (framesProcessed < 0) {
            result = framesProcessed;
            break;
        }
        framesLeft -= (int32_t) framesProcessed;
        audioData += framesProcessed * getBytesPerFrame();


        // Should we block?
        if (timeoutNanoseconds == 0) {
            break; // don't block
        } else if (wakeTimeNanos != 0) {
            if (!mAudioEndpoint->isFreeRunning()) {
                // If there is software on the other end of the FIFO then it may get delayed.
                // So wake up just a little after we expect it to be ready.
                wakeTimeNanos += mWakeupDelayNanos;
            }


            currentTimeNanos = AudioClock::getNanoseconds();
            int64_t earliestWakeTime = currentTimeNanos + mMinimumSleepNanos;
            // Guarantee a minimum sleep time.
            if (wakeTimeNanos < earliestWakeTime) {
                wakeTimeNanos = earliestWakeTime;
            }


            if (wakeTimeNanos > deadlineNanos) {
                // If we time out, just return the framesWritten so far.
                // TODO remove after we fix the deadline bug
                ALOGW("processData(): entered at %lld nanos, currently %lld",
                      (long long) entryTimeNanos, (long long) currentTimeNanos);
                ALOGW("processData(): TIMEOUT after %lld nanos",
                      (long long) timeoutNanoseconds);
                ALOGW("processData(): wakeTime = %lld, deadline = %lld nanos",
                      (long long) wakeTimeNanos, (long long) deadlineNanos);
                ALOGW("processData(): past deadline by %d micros",
                      (int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
                mClockModel.dump();
                mAudioEndpoint->dump();
                break;
            }


            if (ATRACE_ENABLED()) {
                int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
                ATRACE_INT(fifoName, fullFrames);
                int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
                ATRACE_INT("aaSlpNs", (int32_t)sleepForNanos);
            }


            AudioClock::sleepUntilNanoTime(wakeTimeNanos);
            currentTimeNanos = AudioClock::getNanoseconds();
        }
    }


    if (ATRACE_ENABLED()) {
        int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
        ATRACE_INT(fifoName, fullFrames);
    }


    // return error or framesProcessed
    (void) loopCount;
    ATRACE_END();
    return (result < 0) ? result : numFrames - framesLeft;
}

调用processDataNow方法:

nique_ptr<AudioEndpoint> mAudioEndpoint;   // source for reads or sink for writes
//frameworks/av/media/libaaudio/src/core/AudioStreamInternalPlay.cpp
// Write as much data as we can without blocking.
aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
                                              int64_t currentNanoTime, int64_t *wakeTimePtr) {
    aaudio_result_t result = processCommands();
    if (result != AAUDIO_OK) {
        return result;
    }


    const char *traceName = "aaWrNow";
    ATRACE_BEGIN(traceName);


    if (mClockModel.isStarting()) {
        // Still haven't got any timestamps from server.
        // Keep waiting until we get some valid timestamps then start writing to the
        // current buffer position.
        ALOGV("%s() wait for valid timestamps", __func__);
        // Sleep very briefly and hope we get a timestamp soon.
        *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
        ATRACE_END();
        return 0;
    }
    // If we have gotten this far then we have at least one timestamp from server.


    // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
    if (mAudioEndpoint->isFreeRunning()) {
        // Update data queue based on the timing model.
        int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
        // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
        mAudioEndpoint->setDataReadCounter(estimatedReadCounter);
    }


    if (mNeedCatchUp.isRequested()) {
        // Catch an MMAP pointer that is already advancing.
        // This will avoid initial underruns caused by a slow cold start.
        // We add a one burst margin in case the DSP advances before we can write the data.
        // This can help prevent the beginning of the stream from being skipped.
        advanceClientToMatchServerPosition(getFramesPerBurst());
        mNeedCatchUp.acknowledge();
    }


    // If the read index passed the write index then consider it an underrun.
    // For shared streams, the xRunCount is passed up from the service.
    if (mAudioEndpoint->isFreeRunning() && mAudioEndpoint->getFullFramesAvailable() < 0) {
        mXRunCount++;
        if (ATRACE_ENABLED()) {
            ATRACE_INT("aaUnderRuns", mXRunCount);
        }
    }


    // Write some data to the buffer.
    //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
    int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
    //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
    //    numFrames, framesWritten);
    if (ATRACE_ENABLED()) {
        ATRACE_INT("aaWrote", framesWritten);
    }


    // Sleep if there is too much data in the buffer.
    // Calculate an ideal time to wake up.
    if (wakeTimePtr != nullptr
            && (mAudioEndpoint->getFullFramesAvailable() >= getBufferSize())) {
        // By default wake up a few milliseconds from now.  // TODO review
        int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
        aaudio_stream_state_t state = getState();
        //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
        //      AAudio_convertStreamStateToText(state));
        switch (state) {
            case AAUDIO_STREAM_STATE_OPEN:
            case AAUDIO_STREAM_STATE_STARTING:
                if (framesWritten != 0) {
                    // Don't wait to write more data. Just prime the buffer.
                    wakeTime = currentNanoTime;
                }
                break;
            case AAUDIO_STREAM_STATE_STARTED:
            {
                // Calculate when there will be room available to write to the buffer.
                // If the appBufferSize is smaller than the endpointBufferSize then
                // we will have room to write data beyond the appBufferSize.
                // That is a technique used to reduce glitches without adding latency.
                const int32_t appBufferSize = getBufferSize();
                // The endpoint buffer size is set to the maximum that can be written.
                // If we use it then we must carve out some room to write data when we wake up.
                const int32_t endBufferSize = mAudioEndpoint->getBufferSizeInFrames()
                        - getFramesPerBurst();
                const int32_t bestBufferSize = std::min(appBufferSize, endBufferSize);
                int64_t targetReadPosition = mAudioEndpoint->getDataWriteCounter() - bestBufferSize;
                wakeTime = mClockModel.convertPositionToTime(targetReadPosition);
            }
                break;
            default:
                break;
        }
        *wakeTimePtr = wakeTime;


    }


    ATRACE_END();
    return framesWritten;
}

上面方法主要处理如下:

1、调用AudioEndpoint的setDataReadCounter方法,设置数据读取计数器。

2、调用writeNowWithConversion方法。

下面分别进行分析:

AudioEndpoint::setDataReadCounter

AudioEndpoint的setDataReadCounter方法:

std::unique_ptr<android::FifoBufferIndirect> mDataQueue;
//frameworks/av/media/libaaudio/src/core/AudioEndpoint.cpp
void AudioEndpoint::setDataReadCounter(fifo_counter_t framesRead) {
    if (mDataQueue != nullptr) {
        mDataQueue->setReadCounter(framesRead);
    }
}

调用FifoBufferIndirect的setReadCounter方法:

std::atomic<fifo_counter_t> * mReadCounterAddress;
//frameworks/av/media/libaaudio/src/core/FifoControllerIndirect.h
class FifoControllerIndirect : public FifoControllerBase {
    virtual void setReadCounter(fifo_counter_t count) override {
        mReadCounterAddress->store(count, std::memory_order_release);
    }
}

writeNowWithConversion

writeNowWithConversion方法:

AAudioFlowGraph          mFlowGraph;
//frameworks/av/media/libaaudio/src/core/AudioStreamInternalPlay.cpp
aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
                                                            int32_t numFrames) {
    WrappingBuffer wrappingBuffer;
    uint8_t *byteBuffer = (uint8_t *) buffer;
    int32_t framesLeft = numFrames;


    mAudioEndpoint->getEmptyFramesAvailable(&wrappingBuffer);


    // Write data in one or two parts.
    int partIndex = 0;
    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
        int32_t framesToWrite = framesLeft;
        int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
        if (framesAvailable > 0) {
            if (framesToWrite > framesAvailable) {
                framesToWrite = framesAvailable;
            }


            int32_t numBytes = getBytesPerFrame() * framesToWrite;


            mFlowGraph.process((void *)byteBuffer,
                               wrappingBuffer.data[partIndex],
                               framesToWrite);


            byteBuffer += numBytes;
            framesLeft -= framesToWrite;
        } else {
            break;
        }
        partIndex++;
    }
    int32_t framesWritten = numFrames - framesLeft;
    mAudioEndpoint->advanceWriteIndex(framesWritten);


    return framesWritten;
}

调用AAudioFlowGraph的process方法:

std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::FlowGraphSourceBuffered> mSource;
std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::FlowGraphSink> mSink;
//frameworks/av/media/libaaudio/src/client/AAudioFlowGraph.cpp
void AAudioFlowGraph::process(const void *source, void *destination, int32_t numFrames) {
    mSource->setData(source, numFrames);
    mSink->read(destination, numFrames);
}

调用FlowGraphSourceBuffered的setData方法:

class FlowGraphSourceBuffered : public FlowGraphSource {
    void setData(const void *data, int32_t numFrames) {
        mData = data;
        mSizeInFrames = numFrames;
        mFrameIndex = 0;
    }
}

AudioStreamTrack::write

AudioStreamTrack的write方法:

android::sp<android::AudioTrack> mAudioTrack;
//frameworks/av/media/libaaudio/src/core/AudioStreamTrack.cpp
aaudio_result_t AudioStreamTrack::write(const void *buffer,
                                      int32_t numFrames,
                                      int64_t timeoutNanoseconds)
{
    int32_t bytesPerFrame = getBytesPerFrame();
    int32_t numBytes;
    aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
    if (result != AAUDIO_OK) {
        return result;
    }


    if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
        return AAUDIO_ERROR_DISCONNECTED;
    }


    // TODO add timeout to AudioTrack
    bool blocking = timeoutNanoseconds > 0;
    ssize_t bytesWritten = mAudioTrack->write(buffer, numBytes, blocking); //调用AudioTrack的write方法
    if (bytesWritten == WOULD_BLOCK) {
        return 0;
    } else if (bytesWritten < 0) {
        ALOGE("invalid write, returned %d", (int)bytesWritten);
        // in this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
        // AudioTrack invalidation
        if (bytesWritten == DEAD_OBJECT) {
            setState(AAUDIO_STREAM_STATE_DISCONNECTED);
            return AAUDIO_ERROR_DISCONNECTED;
        }
        return AAudioConvert_androidToAAudioResult(bytesWritten);
    }
    int32_t framesWritten = (int32_t)(bytesWritten / bytesPerFrame);
    incrementFramesWritten(framesWritten);


    result = updateStateMachine();
    if (result != AAUDIO_OK) {
        return result;
    }


    return framesWritten;
}

调用AudioTrack的write方法:

待更新

  • 3
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值