Android13 AAudioStream_read流程分析

AAudioStream_read流程分析:

//frameworks/av/media/libaaudio/src/core/AAudioAudio.cpp
AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream* stream,
                               void *buffer,
                               int32_t numFrames,
                               int64_t timeoutNanoseconds)
{
    AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
    if (buffer == nullptr) {
        return AAUDIO_ERROR_NULL;
    }
    if (numFrames < 0) {
        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
    } else if (numFrames == 0) {
        return 0;
    }


    aaudio_result_t result = audioStream->read(buffer, numFrames, timeoutNanoseconds);


    return result;
}

AudioStreamInternalCapture::read

调用AudioStream的read方法,AudioStreamInternal和AudioStreamLegacy继承于AudioStream,而AudioStreamInternalCapture继承于AudioStreamInternal,AudioStreamRecord继承于AudioStreamLegacy,因此调用AudioStream的read方法会调用AudioStreamInternalCapture的read方法或AudioStreamRecord的read方法,下面分别分析它们:

//frameworks/av/media/libaaudio/src/core/AudioStreamInternalCapture.cpp
aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
                                               int64_t timeoutNanoseconds)
{
    return processData(buffer, numFrames, timeoutNanoseconds);
}

调用processData方法:

//frameworks/av/media/libaaudio/src/core/AudioStreamInternalCapture.cpp
// Read as much data as we can without blocking.
aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
                                                  int64_t currentNanoTime, int64_t *wakeTimePtr) {
    aaudio_result_t result = processCommands();
    if (result != AAUDIO_OK) {
        return result;
    }


    const char *traceName = "aaRdNow";
    ATRACE_BEGIN(traceName);


    if (mClockModel.isStarting()) {
        // Still haven't got any timestamps from server.
        // Keep waiting until we get some valid timestamps then start writing to the
        // current buffer position.
        ALOGD("processDataNow() wait for valid timestamps");
        // Sleep very briefly and hope we get a timestamp soon.
        *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
        ATRACE_END();
        return 0;
    }
    // If we have gotten this far then we have at least one timestamp from server.


    if (mAudioEndpoint->isFreeRunning()) {
        //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
        // Update data queue based on the timing model.
        // Jitter in the DSP can cause late writes to the FIFO.
        // This might be caused by resampling.
        // We want to read the FIFO after the latest possible time
        // that the DSP could have written the data.
        int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
        // TODO refactor, maybe use setRemoteCounter()
        mAudioEndpoint->setDataWriteCounter(estimatedRemoteCounter);
    }


    // This code assumes that we have already received valid timestamps.
    if (mNeedCatchUp.isRequested()) {
        // Catch an MMAP pointer that is already advancing.
        // This will avoid initial underruns caused by a slow cold start.
        advanceClientToMatchServerPosition(0 /*serverMargin*/);
        mNeedCatchUp.acknowledge();
    }


    // If the capture buffer is full beyond capacity then consider it an overrun.
    // For shared streams, the xRunCount is passed up from the service.
    if (mAudioEndpoint->isFreeRunning()
        && mAudioEndpoint->getFullFramesAvailable() > mAudioEndpoint->getBufferCapacityInFrames()) {
        mXRunCount++;
        if (ATRACE_ENABLED()) {
            ATRACE_INT("aaOverRuns", mXRunCount);
        }
    }


    // Read some data from the buffer.
    //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
    int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
    //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
    //    numFrames, framesProcessed);
    if (ATRACE_ENABLED()) {
        ATRACE_INT("aaRead", framesProcessed);
    }


    // Calculate an ideal time to wake up.
    if (wakeTimePtr != nullptr && framesProcessed >= 0) {
        // By default wake up a few milliseconds from now.  // TODO review
        int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
        aaudio_stream_state_t state = getState();
        //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
        //      AAudio_convertStreamStateToText(state));
        switch (state) {
            case AAUDIO_STREAM_STATE_OPEN:
            case AAUDIO_STREAM_STATE_STARTING:
                break;
            case AAUDIO_STREAM_STATE_STARTED:
            {
                // When do we expect the next write burst to occur?


                // Calculate frame position based off of the readCounter because
                // the writeCounter might have just advanced in the background,
                // causing us to sleep until a later burst.
                int64_t nextPosition = mAudioEndpoint->getDataReadCounter() + getFramesPerBurst();
                wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
            }
                break;
            default:
                break;
        }
        *wakeTimePtr = wakeTime;


    }


    ATRACE_END();
    return framesProcessed;
}

调用readNowWithConversionfan方法:

//frameworks/av/media/libaaudio/src/core/AudioStreamInternalCapture.cpp
aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
                                                                int32_t numFrames) {
    // ALOGD("readNowWithConversion(%p, %d)",
    //              buffer, numFrames);
    WrappingBuffer wrappingBuffer;
    uint8_t *destination = (uint8_t *) buffer;
    int32_t framesLeft = numFrames;


    mAudioEndpoint->getFullFramesAvailable(&wrappingBuffer);


    // Read data in one or two parts.
    for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
        int32_t framesToProcess = framesLeft;
        const int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
        if (framesAvailable <= 0) break;


        if (framesToProcess > framesAvailable) {
            framesToProcess = framesAvailable;
        }


        const int32_t numBytes = getBytesPerFrame() * framesToProcess;
        const int32_t numSamples = framesToProcess * getSamplesPerFrame();


        const audio_format_t sourceFormat = getDeviceFormat();
        const audio_format_t destinationFormat = getFormat();


        memcpy_by_audio_format(destination, destinationFormat,
                wrappingBuffer.data[partIndex], sourceFormat, numSamples);


        destination += numBytes;
        framesLeft -= framesToProcess;
    }


    int32_t framesProcessed = numFrames - framesLeft;
    mAudioEndpoint->advanceReadIndex(framesProcessed);


    //ALOGD("readNowWithConversion() returns %d", framesProcessed);
    return framesProcessed;
}

AudioStreamRecord::read

AudioStreamRecord的read方法:

android::sp<android::AudioRecord> mAudioRecord;
//frameworks/av/media/libaaudio/src/core/AudioStreamRecord.cpp
aaudio_result_t AudioStreamRecord::read(void *buffer,
                                      int32_t numFrames,
                                      int64_t timeoutNanoseconds)
{
    int32_t bytesPerDeviceFrame = getBytesPerDeviceFrame();
    int32_t numBytes;
    // This will detect out of range values for numFrames.
    aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerDeviceFrame, &numBytes);
    if (result != AAUDIO_OK) {
        return result;
    }


    if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
        return AAUDIO_ERROR_DISCONNECTED;
    }


    // TODO add timeout to AudioRecord
    bool blocking = (timeoutNanoseconds > 0);


    ssize_t bytesActuallyRead = 0;
    ssize_t totalBytesRead = 0;
    if (mFormatConversionBufferI16.get() != nullptr) {
        // Convert I16 data to float using an intermediate buffer.
        float *floatBuffer = (float *) buffer;
        int32_t framesLeft = numFrames;
        // Perform conversion using multiple read()s if necessary.
        while (framesLeft > 0) {
            // Read into short internal buffer.
            int32_t framesToRead = std::min(framesLeft, mFormatConversionBufferSizeInFrames);
            size_t bytesToRead = framesToRead * bytesPerDeviceFrame;
            bytesActuallyRead = mAudioRecord->read(mFormatConversionBufferI16.get(), bytesToRead, blocking);
            if (bytesActuallyRead <= 0) {
                break;
            }
            totalBytesRead += bytesActuallyRead;
            int32_t framesToConvert = bytesActuallyRead / bytesPerDeviceFrame;
            // Convert into app float buffer.
            size_t numSamples = framesToConvert * getSamplesPerFrame();
            memcpy_to_float_from_i16(
                    floatBuffer,
                    mFormatConversionBufferI16.get(),
                    numSamples);
            floatBuffer += numSamples;
            framesLeft -= framesToConvert;
        }
    } else {
        bytesActuallyRead = mAudioRecord->read(buffer, numBytes, blocking);
        totalBytesRead = bytesActuallyRead;
    }
    if (bytesActuallyRead == WOULD_BLOCK) {
        return 0;
    } else if (bytesActuallyRead < 0) {
        // In this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
        // AudioRecord invalidation.
        if (bytesActuallyRead == DEAD_OBJECT) {
            setState(AAUDIO_STREAM_STATE_DISCONNECTED);
            return AAUDIO_ERROR_DISCONNECTED;
        }
        return AAudioConvert_androidToAAudioResult(bytesActuallyRead);
    }
    int32_t framesRead = (int32_t)(totalBytesRead / bytesPerDeviceFrame);
    incrementFramesRead(framesRead);


    result = updateStateMachine();
    if (result != AAUDIO_OK) {
        return result;
    }


    return (aaudio_result_t) framesRead;
}

调用AudioRecord的read方法:

待更新

  • 5
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值