freeswitch mrcp 源码分析--event事件产生

event事件的构建主要在speech_thread()函数里面。

static void *SWITCH_THREAD_FUNC speech_thread(switch_thread_t *thread, void *obj)
{
    struct speech_thread_handle *sth = (struct speech_thread_handle *) obj;
    switch_channel_t *channel = switch_core_session_get_channel(sth->session);
    switch_asr_flag_t flags = SWITCH_ASR_FLAG_NONE;
    switch_status_t status;
    switch_event_t *event;

    switch_thread_cond_create(&sth->cond, sth->pool);
    switch_mutex_init(&sth->mutex, SWITCH_MUTEX_NESTED, sth->pool);

    if (switch_core_session_read_lock(sth->session) != SWITCH_STATUS_SUCCESS) {
        sth->ready = 0;
        return NULL;
    }

    switch_mutex_lock(sth->mutex);

    sth->ready = 1;

    while (switch_channel_up_nosig(channel) && !switch_test_flag(sth->ah, SWITCH_ASR_FLAG_CLOSED)) {
        char *xmlstr = NULL;
        switch_event_t *headers = NULL;

        switch_thread_cond_wait(sth->cond, sth->mutex);

        if (switch_channel_down_nosig(channel) || switch_test_flag(sth->ah, SWITCH_ASR_FLAG_CLOSED)) {
            break;
        }

        if (switch_core_asr_check_results(sth->ah, &flags) == SWITCH_STATUS_SUCCESS) {

            status = switch_core_asr_get_results(sth->ah, &xmlstr, &flags);

            if (status != SWITCH_STATUS_SUCCESS && status != SWITCH_STATUS_BREAK) {
                goto done;
            } else if (status == SWITCH_STATUS_SUCCESS) {
                /* Try to fetch extra information for this result, the return value doesn't really matter here - it's just optional data. */
                switch_core_asr_get_result_headers(sth->ah, &headers, &flags);
            }

            if (status == SWITCH_STATUS_SUCCESS && switch_true(switch_channel_get_variable(channel, "asr_intercept_dtmf"))) {
                const char *p;

                if ((p = switch_stristr("<input>", xmlstr))) {
                    p += 7;
                }

                while (p && *p) {
                    char c;

                    if (*p == '<') {
                        break;
                    }

                    if (!strncasecmp(p, "pound", 5)) {
                        c = '#';
                        p += 5;
                    } else if (!strncasecmp(p, "hash", 4)) {
                        c = '#';
                        p += 4;
                    } else if (!strncasecmp(p, "star", 4)) {
                        c = '*';
                        p += 4;
                    } else if (!strncasecmp(p, "asterisk", 8)) {
                        c = '*';
                        p += 8;
                    } else {
                        c = *p;
                        p++;
                    }

                    if (is_dtmf(c)) {
                        switch_dtmf_t dtmf = {0};
                        dtmf.digit = c;
                        dtmf.duration = switch_core_default_dtmf_duration(0);
                        dtmf.source = SWITCH_DTMF_INBAND_AUDIO;
                        switch_log_printf(SWITCH_CHANNEL_CHANNEL_LOG(channel), SWITCH_LOG_DEBUG, "Queue speech detected dtmf %c\n", c);
                        switch_channel_queue_dtmf(channel, &dtmf);
                    }

                }
                switch_ivr_resume_detect_speech(sth->session);
            }

            if (switch_event_create(&event, SWITCH_EVENT_DETECTED_SPEECH) == SWITCH_STATUS_SUCCESS) {
                if (status == SWITCH_STATUS_SUCCESS) {
                    switch_event_add_header_string(event, SWITCH_STACK_BOTTOM, "Speech-Type", "detected-speech");

                    if (headers) {
                        switch_event_merge(event, headers);
                    }

                    switch_event_add_body(event, "%s", xmlstr);
                } else {
                    switch_event_add_header_string(event, SWITCH_STACK_BOTTOM, "Speech-Type", "begin-speaking");
                }

                if (switch_test_flag(sth->ah, SWITCH_ASR_FLAG_FIRE_EVENTS)) {
                    switch_event_t *dup;

                    if (switch_event_dup(&dup, event) == SWITCH_STATUS_SUCCESS) {
                        switch_channel_event_set_data(channel, dup);
                        switch_event_fire(&dup);
                    }

                }

                if (switch_core_session_queue_event(sth->session, &event) != SWITCH_STATUS_SUCCESS) {
                    switch_log_printf(SWITCH_CHANNEL_CHANNEL_LOG(channel), SWITCH_LOG_ERROR, "Event queue failed!\n");
                    switch_event_add_header_string(event, SWITCH_STACK_BOTTOM, "delivery-failure", "true");
                    switch_event_fire(&event);
                }
            }

            switch_safe_free(xmlstr);

            if (headers) {
                switch_event_destroy(&headers);
            }
        }
    }
  done:

    if (switch_event_create(&event, SWITCH_EVENT_DETECTED_SPEECH) == SWITCH_STATUS_SUCCESS) {
        switch_event_add_header_string(event, SWITCH_STACK_BOTTOM, "Speech-Type", "closed");
        if (switch_test_flag(sth->ah, SWITCH_ASR_FLAG_FIRE_EVENTS)) {
            switch_event_t *dup;

            if (switch_event_dup(&dup, event) == SWITCH_STATUS_SUCCESS) {
                switch_channel_event_set_data(channel, dup);
                switch_event_fire(&dup);
            }

        }

        if (switch_core_session_queue_event(sth->session, &event) != SWITCH_STATUS_SUCCESS) {
            switch_log_printf(SWITCH_CHANNEL_CHANNEL_LOG(channel), SWITCH_LOG_ERROR, "Event queue failed!\n");
            switch_event_add_header_string(event, SWITCH_STACK_BOTTOM, "delivery-failure", "true");
            switch_event_fire(&event);
        }
    }

    switch_mutex_unlock(sth->mutex);
    switch_core_session_rwunlock(sth->session);

    return NULL;
}

可以看到里面是一个while循环,会阻塞在sth->cond这个信号了里面。在对应的session读取完一帧数据后会产生sth->cond这个信号量。这个信号量最后是在speech_callback里面产生的。详细的产生过程这里就不进行介绍。不过本人通过gdb调试将他的调用栈打了出来,可以自己分析一下。

#0  speech_callback (bug=0x7f7840125f68, user_data=0x7f7840135ce8, type=<optimized out>) at src/switch_ivr_async.c:4549
#1  0x00007f788a3bb9e7 in switch_core_session_read_frame (session=session@entry=0x7f7840051608, frame=frame@entry=0x7f7883fae068, flags=flags@entry=0, stream_id=stream_id@entry=0) at src/switch_core_io.c:742
#2  0x00007f788a427361 in audio_bridge_thread (obj=obj@entry=0x7f784012c8a0, thread=0x0) at src/switch_ivr_bridge.c:686
#3  0x00007f788a4294d5 in switch_ivr_multi_threaded_bridge (session=0x7f7840051608, peer_session=0x7f784008d258, input_callback=<optimized out>, session_data=<optimized out>, peer_session_data=0x7f784002df40)
    at src/switch_ivr_bridge.c:1616
#4  0x00007f788a48c001 in bridge (session_a=..., session_b=...) at src/switch_cpp.cpp:1420
#5  0x00007f783ee89461 in _wrap_bridge (L=0x7f7840003b40) at mod_lua_wrap.cpp:7791
#6  0x00007f783ec4b328 in luaD_precall () from /lib64/liblua-5.1.so
#7  0x00007f783ec55e7f in luaV_execute () from /lib64/liblua-5.1.so
#8  0x00007f783ec4b73d in luaD_call () from /lib64/liblua-5.1.so
#9  0x00007f783ec4aa6e in luaD_rawrunprotected () from /lib64/liblua-5.1.so
#10 0x00007f783ec4b8ca in luaD_pcall () from /lib64/liblua-5.1.so
#11 0x00007f783ec4744d in lua_pcall () from /lib64/liblua-5.1.so
#12 0x00007f783ee765e2 in docall (L=0x7f7840003b40, narg=0, nresults=0, perror=0, fatal=1) at mod_lua.cpp:92
#13 0x00007f783ee76bdd in lua_parse_and_execute (L=L@entry=0x7f7840003b40, input_code=<optimized out>, session=session@entry=0x0) at mod_lua.cpp:195
#14 0x00007f783ee76c77 in lua_thread_run (thread=<optimized out>, obj=0x7f7840004c40) at mod_lua.cpp:222
#15 0x00007f788a668210 in dummy_worker (opaque=0x7f7840004f48) at threadproc/unix/thread.c:151
#16 0x00007f7888479dc5 in start_thread () from /lib64/libpthre

回到speech_thread函数
进程被唤醒后会执行switch_core_asr_check_results()这个函数,去检查是否有识别结果数据。

SWITCH_DECLARE(switch_status_t) switch_core_asr_check_results(switch_asr_handle_t *ah, switch_asr_flag_t *flags)
{
    switch_assert(ah != NULL);

    return ah->asr_interface->asr_check_results(ah, flags);
}

asr_interface是个switch_asr_interface_t结构体指针,也是在recog_load()里面赋值的。

    asr_interface->interface_name = MOD_UNIMRCP;
    asr_interface->asr_open = recog_asr_open;
    asr_interface->asr_load_grammar = recog_asr_load_grammar;
    asr_interface->asr_unload_grammar = recog_asr_unload_grammar;
    asr_interface->asr_enable_grammar = recog_asr_enable_grammar;
    asr_interface->asr_disable_grammar = recog_asr_disable_grammar;
    asr_interface->asr_disable_all_grammars = recog_asr_disable_all_grammars;
    asr_interface->asr_close = recog_asr_close;
    asr_interface->asr_feed = recog_asr_feed;
    asr_interface->asr_feed_dtmf = recog_asr_feed_dtmf;
    asr_interface->asr_resume = recog_asr_resume;
    asr_interface->asr_pause = recog_asr_pause;
    asr_interface->asr_check_results = recog_asr_check_results;
    asr_interface->asr_get_results = recog_asr_get_results;
    asr_interface->asr_get_result_headers = recog_asr_get_result_headers;
    asr_interface->asr_start_input_timers = recog_asr_start_input_timers;
    asr_interface->asr_text_param = recog_asr_text_param;
    asr_interface->asr_numeric_param = recog_asr_numeric_param;
    asr_interface->asr_float_param = recog_asr_float_param;

所以其实调用的是recog_asr_check_results()。

static switch_status_t recog_asr_check_results(switch_asr_handle_t *ah, switch_asr_flag_t *flags)
{
    speech_channel_t *schannel = (speech_channel_t *) ah->private_info;
    return recog_channel_check_results(schannel);
}
static switch_status_t recog_channel_check_results(speech_channel_t *schannel)
{
    switch_status_t status = SWITCH_STATUS_SUCCESS;
    recognizer_data_t *r;
    switch_mutex_lock(schannel->mutex);
    r = (recognizer_data_t *) schannel->data;
    if (!zstr(r->result)) {
        switch_log_printf(SWITCH_CHANNEL_UUID_LOG(schannel->session_uuid), SWITCH_LOG_DEBUG, "(%s) SUCCESS, have result\n", schannel->name);
    } else if (r->start_of_input == START_OF_INPUT_RECEIVED) {
        switch_log_printf(SWITCH_CHANNEL_UUID_LOG(schannel->session_uuid), SWITCH_LOG_DEBUG, "(%s) SUCCESS, start of input\n", schannel->name);
    } else {
        status = SWITCH_STATUS_FALSE;
    }

    switch_mutex_unlock(schannel->mutex);
    return status;
}

在上一篇博文freeswitch mrcp 源码分析–数据接收(下)里面,我们说过:识别结果的具体内容放到recognizer_data_t的result中。所以当有识别结果的时候:r->result 不为空,所以函数的返回是SWITCH_STATUS_SUCCESS。

if (switch_event_create(&event, SWITCH_EVENT_DETECTED_SPEECH) == SWITCH_STATUS_SUCCESS) {
                if (status == SWITCH_STATUS_SUCCESS) {
                    switch_event_add_header_string(event, SWITCH_STACK_BOTTOM, "Speech-Type", "detected-speech");

                    if (headers) {
                        switch_event_merge(event, headers);
                    }

                    switch_event_add_body(event, "%s", xmlstr);
                } else {
                    switch_event_add_header_string(event, SWITCH_STACK_BOTTOM, "Speech-Type", "begin-speaking");
                }

                if (switch_test_flag(sth->ah, SWITCH_ASR_FLAG_FIRE_EVENTS)) {
                    switch_event_t *dup;

                    if (switch_event_dup(&dup, event) == SWITCH_STATUS_SUCCESS) {
                        switch_channel_event_set_data(channel, dup);
                        switch_event_fire(&dup);
                    }

                }

                if (switch_core_session_queue_event(sth->session, &event) != SWITCH_STATUS_SUCCESS) {
                    switch_log_printf(SWITCH_CHANNEL_CHANNEL_LOG(channel), SWITCH_LOG_ERROR, "Event queue failed!\n");
                    switch_event_add_header_string(event, SWITCH_STACK_BOTTOM, "delivery-failure", "true");
                    switch_event_fire(&event);
                }
            }

            switch_safe_free(xmlstr);

            if (headers) {
                switch_event_destroy(&headers);
            }

接下来就是顺理成章的数据组装了,先调用switch_event_create()创建fs事件:

switch_event_create(&event, SWITCH_EVENT_DETECTED_SPEECH)

然后switch_event_add_header_string()和switch_event_merge()添加事件的头部信息:

switch_event_add_header_string(event, SWITCH_STACK_BOTTOM, "Speech-Type", "detected-speech");

                    if (headers) {
                        switch_event_merge(event, headers);
                    }

然后调用switch_event_add_body()添加消息体。

switch_event_add_body(event, "%s", xmlstr);

最后调用switch_channel_event_set_data增加通道信息,并调用switch_event_fire将事件发出去:

switch_channel_event_set_data(channel, dup);
switch_event_fire(&dup);
freeswitch是一个功能强大的开源电话交换平台,可以用于构建语音、视频和聊天应用程序。想要将freeswitch移植到ARM架构下进行交叉编译,可以按照以下步骤进行。 首先,确保你有一个可以运行Linux操作系统的ARM开发板或设备。ARM架构有许多不同的变种,所以你需要选择适合你设备的版本。 其次,安装交叉编译工具链。交叉编译工具链包含用于在一种架构上生成另一种架构的二进制文件的工具。你需要为ARM架构下载并安装适当的交叉编译工具链。 然后,获取freeswitch的源代码。你可以从官方网站上下载最新的源代码包或从版本控制系统中获取最新的开发版本。将源代码解压到你的开发环境中。 接下来,配置freeswitch的编译选项。进入freeswitch的源代码目录,执行./configure命令来配置编译选项。在configure命令中,使用--host选项指定目标架构为ARM,指定交叉编译工具链的路径。 然后,进行编译。运行make命令来编译freeswitch。这可能需要一些时间,因为编译freeswitch可能涉及到许多依赖库的编译和链接。 最后,将编译后的二进制文件和依赖库安装到ARM设备中。使用make install命令来安装freeswitch到指定的目录。 完成上述步骤后,你就成功地将freeswitch移植到ARM架构中,并进行了交叉编译。现在,你可以在ARM设备上运行freeswitch,并构建语音、视频和聊天应用程序了。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值