Android N Audio: Audio Track play

对于AT在应用层开始play,到AF打开输出通道的调用流程如下图示,代码分析就不赘述了,

AT:AudioTrack  AF:AudioFlinger AS:AudioSystemAPII:AudioPolicyInterfaceImpl APS:AudioPolicyService APM:AudioPolicyManager

 

在openOutput之后,

 

经过艰难的跋涉,终于又回到了AF,在这里,port的类型有所不同,如下将端口类型分为三种,session、混音和物理设备。

/* Audio port type indicates if it is a session (e.g AudioTrack),

 * a mix (e.g PlaybackThread output) or a physical device

 * (e.g AUDIO_DEVICE_OUT_SPEAKER) */

typedef enum {

    AUDIO_PORT_TYPE_NONE,

    AUDIO_PORT_TYPE_DEVICE,

    AUDIO_PORT_TYPE_MIX,

    AUDIO_PORT_TYPE_SESSION,

} audio_port_type_t;

 

 

根据source和port的类型不同,有不同的策略,来打开不同的输出通道,对于source为AUDIO_PORT_TYPE_DEVICE的通道,如果source数目为2,则如下,为输出的mPlaybackThread线程和MIC输入的mRecordThread线程各打开一个通道。

status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *patch,

                                   audio_patch_handle_t *handle) {

    switch (patch->sources[0].type) {

        case AUDIO_PORT_TYPE_DEVICE: {

            if ((patch->num_sources == 2) ||

                ((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&

                 ((patch->sinks[0].ext.device.hw_module != srcModule) ||

                  (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0)))) {

                    newPatch->mPlaybackThread = audioflinger->openOutput_l(

                                                             patch->sinks[0].ext.device.hw_module,

                                                             &output,

                                                             &config,

                                                             device,

                                                             address,

                                                             AUDIO_OUTPUT_FLAG_NONE);

                audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;

                newPatch->mRecordThread = audioflinger->openInput_l(srcModule,

                                                                    &input,

                                                                    &config,

                                                                    device,

                                                                    address,

                                                                    AUDIO_SOURCE_MIC,

                                                                    AUDIO_INPUT_FLAG_NONE);

 

            } else {

                if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {

                    sp<ThreadBase> thread = audioflinger->checkRecordThread_l(

                                                              patch->sinks[0].ext.mix.handle);

                    if (thread == 0) {

                        ALOGW("createAudioPatch() bad capture I/O handle %d",

                                                              patch->sinks[0].ext.mix.handle);

                        status = BAD_VALUE;

                        goto exit;

                    }

                    status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);

                } else {

                    if (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) {

                        status = INVALID_OPERATION;

                        goto exit;

                    }

 

                    audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();

                    status = hwDevice->create_audio_patch(hwDevice,

                                                           patch->num_sources,

                                                           patch->sources,

                                                           patch->num_sinks,

                                                           patch->sinks,

                                                           &halHandle);

                }

            }

 

create_audio_patch在HAL没有被定义,这个分支应该是走不到的。

 

我们以audioflinger->openOutput_l的分析为主吧。

sp<AudioFlinger::PlaybackThread> AudioFlinger::openOutput_l(audio_module_handle_t module,

                                                            audio_io_handle_t *output,

                                                            audio_config_t *config,

                                                            audio_devices_t devices,

                                                            const String8& address,

                                                            audio_output_flags_t flags)

{

AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);

    AudioStreamOut *outputStream = NULL;

    status_t status = outHwDev->openOutputStream(

            &outputStream,

            *output,

            devices,

            flags,

            config,

            address.string());

 

    mHardwareStatus = AUDIO_HW_IDLE;

 

    if (status == NO_ERROR) {

 

        PlaybackThread *thread;

        if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {

            thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);

            ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread);

        } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)

                || !isValidPcmSinkFormat(config->format)

                || !isValidPcmSinkChannelMask(config->channel_mask)) {

            thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);

            ALOGV("openOutput_l() created direct output: ID %d thread %p ", *output, thread);

            //Check if this is DirectPCM, if so

            if (flags & AUDIO_OUTPUT_FLAG_DIRECT_PCM) {

                thread->mIsDirectPcm = true;

            }

        } else {

            thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);

            ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread);

        }

        mPlaybackThreads.add(*output, thread);

        return thread;

    }

 

    return 0;

}

 

主要就是找到或加载之前提到的3类输入输出设备,说直白一点,就是加载对应的so库文件,便于后面调用其中的api接口;再接着就是调用设备的openOutputStream,字面意思是打开输出流,这里再说直白一点,就是将so库的api和现在framework的中间层管理起来,形成从上向下的一个调用流程;最后,根据flag类型,来决定使用哪一种线程处理音频数据,这一点在AF的线程分析里面另行分析。

 

 

对于AudioHwDevice的openOutputStream,逐层向下调用,最终会到平台厂商的hal层audio_hw.c,

status_t AudioHwDevice::openOutputStream(

        AudioStreamOut **ppStreamOut,

        audio_io_handle_t handle,

        audio_devices_t devices,

        audio_output_flags_t flags,

        struct audio_config *config,

        const char *address)

{

 

    struct audio_config originalConfig = *config;

AudioStreamOut *outputStream = new AudioStreamOut(this, flags);

 

    status_t status = outputStream->open(handle, devices, config, address);

    *ppStreamOut = outputStream;

    return status;

}

 

 

status_t AudioStreamOut::open(

        audio_io_handle_t handle,

        audio_devices_t devices,

        struct audio_config *config,

        const char *address)

{

    audio_stream_out_t *outStream;

    int status = hwDev()->open_output_stream(

            hwDev(),

            handle,

            devices,

            customFlags,

            config,

            &outStream,

            address);

    return status;

}

 

 

 

在audio_hw.c的adev_open_output_stream看似很复杂,实际也就是根据flag和device的类型,配置stream_out(定义在audio_hw.h文件里面)的成员变量,并将audio_stream_out类型指针赋值,作为后面操作HAL层的接口。

static int adev_open_output_stream(struct audio_hw_device *dev,

                                   audio_io_handle_t handle,

                                   audio_devices_t devices,

                                   audio_output_flags_t flags,

                                   struct audio_config *config,

                                   struct audio_stream_out **stream_out,

                                   const char *address __unused)

{

    struct audio_device *adev = (struct audio_device *)dev;

    struct stream_out *out;

    int ret = 0;

    audio_format_t format;

*stream_out = NULL;

    out->channel_mask = AUDIO_CHANNEL_OUT_STEREO;

    out->supported_channel_masks[0] = AUDIO_CHANNEL_OUT_STEREO;

    out->handle = handle;

    out->bit_width = CODEC_BACKEND_DEFAULT_BIT_WIDTH;

    out->non_blocking = 0;

    /* Init use case and pcm_config */

    if ((out->flags & AUDIO_OUTPUT_FLAG_DIRECT) &&

        !(out->flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD ||

        (out->flags & AUDIO_OUTPUT_FLAG_DIRECT_PCM)) &&

        (out->devices & AUDIO_DEVICE_OUT_AUX_DIGITAL ||

        out->devices & AUDIO_DEVICE_OUT_PROXY)) {

    out->stream.common.get_sample_rate = out_get_sample_rate;

    out->stream.common.set_sample_rate = out_set_sample_rate;

    out->stream.common.get_buffer_size = out_get_buffer_size;

    out->stream.common.get_channels = out_get_channels;

    out->stream.common.get_format = out_get_format;

    out->stream.common.set_format = out_set_format;

    *stream_out = &out->stream;

 

 }

 

 

n-Track Studio是通过多个音轨合成器,让你可以录制、配音、混合多条WAV和MIDI音轨专业的数字录音室。 还在为专业录音烦恼吗?n-Track Studio在多音轨中提供强大的剪接与编修能力,让你再也不必花大笔钱在昂贵的多轨卡带录音机上,从此计算机变为数字录音室。顾名思义,n-Track Studio提供了多音轨录音的功能,它最大的特色是允许在单一的WAV与MIDI档中,在不同的音轨上加入特效与混音,然后转换成一般的WAV或 MIDI或者时下流行的MP3格式,让所有音轨上的声音可做同时播放。这样的好处可以针对某一音轨做单独的修改,例如换掉背景音乐、抽掉某些杂音、或者在加些音效,对日后的编修与维护来说相当方便。 在操作接口上,n-Track Studio将声波图形化,因此很容易就可以从波形中获得某个区段的取样频率、分辨率以及声道参数等相关信息。 在录制能力方面,n-Track Studio提供了24bit高品质的音乐分辨率,它的双工设计可以让你在录制某一音轨的同时,听到其它音轨中的声音,同时对于和音、合唱、振音等特效,也都有实时试听与细部调整的功能,可以省下许多不断尝试与修改的处理时间。 要发挥n-Track Studio强大的功能,必须要有全双工的声卡(n-Track Studio可支持多块声卡,让你同时输入不同的声音来源),而且操作系统中要装有Microsoft的DirectX Media Runtime与Common Controls Update,若需更详细的相关资料,可以参阅Help及F.A.Soft的网站。 想要拥有自己的录音室吗?有了n-Track Studio,从此你的计算机也可摇身一变,成为专业的数字录音室。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值