ROS科大讯飞语音(二)说话篇
https://blog.csdn.net/zhouge94/article/details/52077997
基本环境配置
创建工作空间
$ mkdir -p ~/catkin_ws/src
$ cd ~/catkin_ws/src
$ catkin_init_workspace
- 首次编译
$ cd ~/catkin_ws/
$ catkin_make
- 创建语音包
$ cd src/
$ catkin_create_pkg xf_voice std_msgs rospy roscpp
安装语音库
- 创建/Robot文件夹将所有者改为当前用户
$ sudo mkdir /Robot
$ sudo chown zhouge /Robot/
- 然后将你下载的语音sdk中的libmsc.so放到/Robot/voice/lib/文件夹下
- 将bin目录和lib/inc目录都放到/Robot/voice/文件夹下,
- 安装mpalyer播放器
- 创建/Robot/voice/wav和/Robot/cmd文件夹,前者用来保存临时音频文件,后者用来保存管道通信文件。
$ sudo apt-get install mplayer
$ mkdir /Robot/voice/wav
$ mkdir /Robot/cmd
- 至此你的/Robot文件夹应该有cmd和voice两个文件夹,voice下应该有bin inc lib wav四个文件夹
- 然后我们把语音库放到系统库文件夹/usr/lib/
$ sudo cp /Robot/voice/lib/libmsc.so /usr/lib/
编写语音合成节点
源码
- 回到工作空间目录,~/catkin_ws
- 在~/catkin_ws/src/xf_voice/src目录下新建文件xf_tts.cpp 并将以下内容复制进去
- 注意:将里面的appid改成你自己的;
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include "/Robot/voice/inc/qtts.h"
#include "/Robot/voice/inc/msp_cmn.h"
#include "/Robot/voice/inc/msp_errors.h"
#include "ros/ros.h"
#include "std_msgs/String.h"
#include <sstream>
#include <sys/types.h>
#include <sys/stat.h>
#define SAYIT system("cp /Robot/voice/wav/say.wav /Robot/voice/wav/temp.wav>/Robot/cmd/Mplayer_cmd");system("echo loadfile /Robot/voice/wav/temp.wav>/Robot/cmd/Mplayer_cmd")
typedef int SR_DWORD;
typedef short int SR_WORD ;
/* wav音频头部格式 */
typedef struct _wave_pcm_hdr
{
char riff[4]; // = "RIFF"
int size_8; // = FileSize - 8
char wave[4]; // = "WAVE"
char fmt[4]; // = "fmt "
int fmt_size; // = 下一个结构体的大小 : 16
short int format_tag; // = PCM : 1
short int channels; // = 通道数 : 1
int samples_per_sec; // = 采样率 : 8000 | 6000 | 11025 | 16000
int avg_bytes_per_sec; // = 每秒字节数 : samples_per_sec * bits_per_sample / 8
short int block_align; // = 每采样点字节数 : wBitsPerSample / 8
short int bits_per_sample; // = 量化比特数: 8 | 16
char data[4]; // = "data";
int data_size; // = 纯数据长度 : FileSize - 44
} wave_pcm_hdr;
/* 默认wav音频头部数据 */
wave_pcm_hdr default_wav_hdr =
{
{ 'R', 'I', 'F', 'F' },
0,
{'W', 'A', 'V', 'E'},
{'f', 'm', 't', ' '},
16,
1,
1,
16000,
32000,
2,
16,
{'d', 'a', 't', 'a'},
0
};
/* 文本合成 */
int text_to_speech(const char* src_text, const char* des_path, const char* params)
{
int ret = -1;
FILE* fp = NULL;
const char* sessionID = NULL;
unsigned int audio_len = 0;
wave_pcm_hdr wav_hdr = default_wav_hdr;
int synth_status = MSP_TTS_FLAG_STILL_HAVE_DATA;
if (NULL == src_text || NULL == des_path)
{
printf("params is error!\n");
return ret;
}
fp = fopen(des_path, "wb");
if (NULL == fp)
{
printf("open %s error.\n", des_path);
return ret;
}
/* 开始合成 */
sessionID = QTTSSessionBegin(params, &ret);
if (MSP_SUCCESS != ret)
{
printf("QTTSSessionBegin failed, error code: %d.\n", ret);
fclose(fp);
return ret;
}
ret = QTTSTextPut(sessionID, src_text, (unsigned int)strlen(src_text), NULL);
if (MSP_SUCCESS != ret)
{
printf("QTTSTextPut failed, error code: %d.\n",ret);
QTTSSessionEnd(sessionID, "TextPutError");
fclose(fp);
return ret;
}
printf("正在合成 ...\n");
fwrite(&wav_hdr, sizeof(wav_hdr) ,1, fp); //添加wav音频头,使用采样率为16000
while (1)
{
/* 获取合成音频 */
const void* data = QTTSAudioGet(sessionID, &audio_len, &synth_status, &ret);
if (MSP_SUCCESS != ret)
break;
if (NULL != data)
{
fwrite(data, audio_len, 1, fp);
wav_hdr.data_size += audio_len; //计算data_size大小
}
if (MSP_TTS_FLAG_DATA_END == synth_status)
break;
}//合成状态synth_status取值请参阅《讯飞语音云API文档》
printf("\n");
if (MSP_SUCCESS != ret)
{
printf("QTTSAudioGet failed, error code: %d.\n",ret);
QTTSSessionEnd(sessionID, "AudioGetError");
fclose(fp);
return ret;
}
/* 修正wav文件头数据的大小 */
wav_hdr.size_8 += wav_hdr.data_size + (sizeof(wav_hdr) - 8);
/* 将修正过的数据写回文件头部,音频文件为wav格式 */
fseek(fp, 4, 0);
fwrite(&wav_hdr.size_8,sizeof(wav_hdr.size_8), 1, fp); //写入size_8的值
fseek(fp, 40, 0); //将文件指针偏移到存储data_size值的位置
fwrite(&wav_hdr.data_size,sizeof(wav_hdr.data_size), 1, fp); //写入data_size的值
fclose(fp);
fp = NULL;
/* 合成完毕 */
ret = QTTSSessionEnd(sessionID, "Normal");
if (MSP_SUCCESS != ret)
{
printf("QTTSSessionEnd failed, error code: %d.\n",ret);
}
return ret;
}
int xf_tts(const char* text,const char *filename)
{
int ret = MSP_SUCCESS;
const char* login_params = "appid = 573bdbff, work_dir = .";//登录参数,appid与msc库绑定,请勿随意改动
const char* session_begin_params = "engine_type =local, text_encoding = UTF8, tts_res_path = fo|/Robot/voice/bin/msc/res/tts/xiaoyan.jet;fo|/Robot/voice/bin/msc/res/tts/common.jet, sample_rate = 16000, speed = 50, volume = 50, pitch = 50, rdn = 2";
/* 用户登录 */
ret = MSPLogin(NULL, NULL, login_params); //第一个参数是用户名,第二个参数是密码,第三个参数是登录参数,用户名和密码可在http://open.voicecloud.cn注册获取
if (MSP_SUCCESS != ret)
{
printf("MSPLogin failed, error code: %d.\n", ret);
goto exit ;//登录失败,退出登录
}
/* 文本合成 */
printf("开始合成 ...\n");
ret = text_to_speech(text, filename, session_begin_params);
if (MSP_SUCCESS != ret)
{
printf("text_to_speech failed, error code: %d.\n", ret);
}
printf("合成完毕\n");
exit:
MSPLogout(); //退出登录
return 0;
}
void xfcallback(const std_msgs::String::ConstPtr& msg)
{
char cmd[2000];
std::cout<<"I heard,I will say:"<<msg->data.c_str()<<std::endl;
xf_tts(msg->data.c_str(),"/Robot/voice/wav/say.wav");
sprintf(cmd,"echo %s>/Robot/cmd/saywords",msg->data.c_str());
popen(cmd,"r");
SAYIT;
}
int main(int argc,char **argv)
{
unlink("/Robot/cmd/Mplayer_cmd");
mkfifo("/Robot/cmd/Mplayer_cmd", 0777);
popen("mplayer -quiet -slave -input file=/Robot/cmd/Mplayer_cmd -idle","r");
printf("Mplayer Run Success");
const char* filename = "/Robot/voice/wav/say.wav"; //合成的语音文件名称
const char* text = "语音合成模块启动成功!"; //合成文本
xf_tts(text,filename);
SAYIT;
ros::init(argc,argv,"xf_tts");
ros::NodeHandle n;
ros::Subscriber sub =n.subscribe("xfsaywords",1000,xfcallback);
ros::spin();
return 0;
}
CMakeLists.txt代码
- 在xf_vocie包里的CMakeLists.txt增加以下代码
add_executable(xf_tts src/xf_tts.cpp)
target_link_libraries(xf_tts ${catkin_LIBRARIES} -lmsc -ldl -lpthread -lm -lrt)
add_dependencies(xf_tts xf_voice_generate_messages_cpp)
- 至此,我的CMakeLists.txt是这样的
cmake_minimum_required(VERSION 2.8.3)
project(xf_voice)
find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
)
catkin_package()
include_directories(include ${catkin_INCLUDE_DIRS})
add_executable(xf_tts src/xf_tts.cpp)
target_link_libraries(xf_tts ${catkin_LIBRARIES} -lmsc -ldl -lpthread -lm -lrt)
add_dependencies(xf_tts xf_voice_generate_messages_cpp)
编译
- 回到catkin_ws目录
- 然后运行catkin_make
$ catkin_make
调试运行
运行三个终端
- 第一个终端 运行主节点
$ roscore
- 第二个终端 运行语音合成节点
$ cd ~/catkin_ws/
$ source devel/setup.sh
$ rosrun xf_voice xf_tts
- 第三个终端 发布语音信息
$ cd ~/catkin_ws/
$ source devel/setup.sh
$ rostopic list
$ rostopic pub /xfsasywords std_msgs/String 语音合成节点测试
效果图
- 通过以上测试,你应该已经听到效果了。
- 运行rqt_graph,可以看到目前的节点图,应该与下图相似,