Android学习(十)AudioFlinger(1)

AudioFlinger

AudioFlinger驻留于MediaServer进程中:

int main(int argc, char** argv)

{

    sp<ProcessState> proc(ProcessState::self());

    sp<IServiceManager> sm = defaultServiceManager();

    LOGI("ServiceManager: %p", sm.get());

    AudioFlinger::instantiate();

    MediaPlayerService::instantiate();

    CameraService::instantiate();

    AudioPolicyService::instantiate();

    ProcessState::self()->startThreadPool();

    IPCThreadState::self()->joinThreadPool();

}

 

static void instantiate() { publish(); }

 

static char const* getServiceName() { return "media.audio_flinger"; }

 

    static status_t publish() {

        sp<IServiceManager> sm(defaultServiceManager());

        return sm->addService(String16(SERVICE::getServiceName()), new SERVICE());àAudioFlinger添加到ServiceManager

    }

 

看看AudioFlinger的构造函数:

AudioFlinger::AudioFlinger()

    : BnAudioFlinger(),

        mAudioHardware(0),à代表Audio硬件的HAL对象

 mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1)

{

    mHardwareStatus = AUDIO_HW_IDLE;

 

    mAudioHardware = AudioHardwareInterface::create();à代表Audio硬件的HAL对象

 

    mHardwareStatus = AUDIO_HW_INIT;

    if (mAudioHardware->initCheck() == NO_ERROR) {

        // open 16-bit output stream for s/w mixerà设置系统初始化的一些值,有一部分通过Audio HAL设置到硬件中

        mMode = AudioSystem::MODE_NORMAL;

        setMode(mMode);

        setMasterVolume(1.0f);

        setMasterMute(false);

}

}

AudioHardwareInterfaceAndroid对代表Audio硬件的封装,属于HAL层。HAL层根据所选硬件情况来实现,多以动态库的形式提供。

class AudioHardwareInterface

{

public:

    virtual ~AudioHardwareInterface() {}

  

    virtual status_t    initCheck() = 0;à用于检查硬件是否初始化成功

    virtual status_t    setVoiceVolume(float volume) = 0;à设置通话音量,范围0~1

    virtual status_t    setMasterVolume(float volume) = 0;à设置除音量外的其他所有音频流类型音量

    virtual status_t    setMode(int mode) = 0;à设置模式,NORMAL的状态为普通,RINGTONE表示来电模式,IN_CALL表示通话模式

 

    // 麦克相关

    virtual status_t    setMicMute(bool state) = 0;

    virtual status_t    getMicMute(bool* state) = 0;

 

    // set/get global audio parameters

    virtual status_t    setParameters(const String8& keyValuePairs) = 0;

    virtual String8     getParameters(const String8& keys) = 0;

 

    virtual size_t    getInputBufferSize(uint32_t sampleRate, int format, int channelCount) = 0;

 

    à openOutputStream:创建音频输出流对象(相当于打开音频输出设备),AudioFlinger可以往其中write数据,指针型参数将返回该音频输出流支持的类型、声道数、采样率等

    virtual AudioStreamOut* openOutputStream(

                                uint32_t devices,

                                int *format=0,

                                uint32_t *channels=0,

                                uint32_t *sampleRate=0,

                                status_t *status=0) = 0;

    virtual    void        closeOutputStream(AudioStreamOut* out) = 0;à关闭

    à创建音频输入流对象(相当于打开音频输入设备),AudioFlinger可以read数据

    virtual AudioStreamIn* openInputStream(

                                uint32_t devices,

                                int *format,

                                uint32_t *channels,

                                uint32_t *sampleRate,

                                status_t *status,

                                AudioSystem::audio_in_acoustics acoustics) = 0;

    virtual    void        closeInputStream(AudioStreamIn* in) = 0;

 

    /**This method dumps the state of the audio hardware */

    virtual status_t dumpState(int fd, const Vector<String16>& args) = 0;

 

    static AudioHardwareInterface* create();à静态create函数,使用设计模式中的工厂模式,具体返回的对象由厂商根据硬件的情况决定

 

protected:

 

    virtual status_t dump(int fd, const Vector<String16>& args) = 0;

};

AudioHardwareInterface的作用如下:

  • AudioHardwareInterface管理音频输出设备对象(AudioStreamOut)和音频输入设备对象(AudioStreamIn)的创建

  • 通过AudioHardwareInterface可以设置音频系统的一些参数

 

回顾AudioTrack的流程:

  1. AudioTrack调用createTrack函数,得到一个IAudioTrack对象

  2. AudioTrack调用IAudioTrack对象的start,表示准备写数据

  3. AudioTrack通过write写数据,这个过程和audio_track_cblk_t有密切的关系

  4. 最后AudioTrack调用IAudioTrackstopdelete IAudioTrack结束工作

分析AudioFlinger::createTrack

sp<IAudioTrack> AudioFlinger::createTrack(

        pid_t pid,àAudioTrackpid

        int streamType,à流类型

        uint32_t sampleRate,à采样率

        int format,àPCM_16

        int channelCount,

        int frameCount,

        uint32_t flags,

        const sp<IMemory>& sharedBuffer,àAudioTrack中传入的共享buffer

        int output,à见九(2),AudioFlinger中的工作线程号

        int *sessionId,

        status_t *status)

{

    sp<PlaybackThread::Track> track;

    sp<TrackHandle> trackHandle;

    sp<Client> client;

    wp<Client> wclient;

    status_t lStatus;

    int lSessionId;

 

    if (streamType >= AudioSystem::NUM_STREAM_TYPES) {

        LOGE("invalid stream type");

        lStatus = BAD_VALUE;

        goto Exit;

    }

 

    {

        Mutex::Autolock _l(mLock);

        PlaybackThread *thread = checkPlaybackThread_l(output);à线程索引号

        PlaybackThread *effectThread = NULL;

        if (thread == NULL) {

            LOGE("unknown output thread");

            lStatus = BAD_VALUE;

            goto Exit;

        }

 

        wclient = mClients.valueFor(pid);à看看这个进程是否已经是AudioFlingerclient

 

        if (wclient != NULL) {

            client = wclient.promote();

        } else {

            client = new Client(this, pid);à如果不是则创建这个Client信息

            mClients.add(pid, client);

        }

 

        LOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId);

        if (sessionId != NULL && *sessionId != AudioSystem::SESSION_OUTPUT_MIX) {

            for (size_t i = 0; i < mPlaybackThreads.size(); i++) {

                sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);

                if (mPlaybackThreads.keyAt(i) != output) {

                    // prevent same audio session on different output threads

                    uint32_t sessions = t->hasAudioSession(*sessionId);

                    if (sessions & PlaybackThread::TRACK_SESSION) {

                        lStatus = BAD_VALUE;

                        goto Exit;

                    }

                    // check if an effect with same session ID is waiting for a track to be created

                    if (sessions & PlaybackThread::EFFECT_SESSION) {

                        effectThread = t.get();

                    }

                }

            }

            lSessionId = *sessionId;

        } else {

            // if no audio session id is provided, create one here

            lSessionId = nextUniqueId();

            if (sessionId != NULL) {

                *sessionId = lSessionId;

            }

        }

        LOGV("createTrack() lSessionId: %d", lSessionId);

 

        track = thread->createTrack_l(client, streamType, sampleRate, format,

                channelCount, frameCount, sharedBuffer, lSessionId, &lStatus);à在找到的工作线程中创建一个Track

 

        // move effect chain to this output thread if an effect on same session was waiting

        // for a track to be created

        if (lStatus == NO_ERROR && effectThread != NULL) {

            Mutex::Autolock _dl(thread->mLock);

            Mutex::Autolock _sl(effectThread->mLock);

            moveEffectChain_l(lSessionId, effectThread, thread, true);

        }

    }

    if (lStatus == NO_ERROR) {

        trackHandle = new TrackHandle(track);à TrackHandleTrack对象的proxy,它支持binder通信,但Track不支持binderTrackHandle所接收的请求最终会由Track处理,典型的proxy模式。

    } else {

        // remove local strong reference to Client before deleting the Track so that the Client

        // destructor is called by the TrackBase destructor with mLock held

        client.clear();

        track.clear();

    }

 

Exit:

    if(status) {

        *status = lStatus;

    }

    return trackHandle;

}

 

分析checkPlaybackThread_l函数:

// checkPlaybackThread_l() must be called with AudioFlinger::mLock held

AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(int output) const

{

    PlaybackThread *thread = NULL;

    if (mPlaybackThreads.indexOfKey(output) >= 0) {à根据output查找

        thread = (PlaybackThread *)mPlaybackThreads.valueFor(output).get();

    }

    return thread;

}

分析createTrack_l

sp<AudioFlinger::PlaybackThread::Track>  AudioFlinger::PlaybackThread::createTrack_l(

        const sp<AudioFlinger::Client>& client,

        int streamType,

        uint32_t sampleRate,

        int format,

        int channelCount,

        int frameCount,

        const sp<IMemory>& sharedBuffer,

        int sessionId,

        status_t *status)

{

track = new Track(this, client, streamType, sampleRate, format,

                channelCount, frameCount, sharedBuffer, sessionId);

mTracks.add(track);

}

这里new了一个Track对象。

转载于:https://my.oschina.net/honeyandroid/blog/516829

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值