ros_indigo语音控制

一、语音测试
1、安装更新源
sudo apt-get install gstreamer0.10-pocketsphinx
sudo apt-get install ros-indigo-audio-common
sudo apt-get install libasound2
2、安装ROS包:
sudo apt-get install ros-indigo-pocketsphinx
3、测试
roslaunch pocketsphinx robocup.launch
然后会报错:

[recognizer-1] process has died [pid 25379, exit code 1, cmd /opt/ros/indigo/lib/pocketsphinx/recognizer.py __name:=recognizer __log:=/home/l/.ros/log/827f0fd0-2ff1-11e7-90b9-000c296e5c8e/recognizer-1.log].
log file: /home/l/.ros/log/827f0fd0-2ff1-11e7-90b9-000c296e5c8e/recognizer-1*.log

纠正上述错误:在终端输入
sudo apt-get install gstreamer0.10-gconf
注释掉:/opt/ros/indigo/lib/pocketsphinx/recognizer.py中的
# self.asr.set_property(‘configured‘, True)
然后在测试:
roslaunch pocketsphinx robocup.launch
将测试结果打印出:
rostopic echo /recognizer/output

二、语音库
1、查看语音库
roscd pocketsphinx/demo
more robocup.corpus
2、增加语音库
首先看看例子中要添加的文本信息:
roscd rbx1_speech/config
more nav_commands.txt

这就是需要添加的文本,我们也可以修改其中的某些文本,改成自己需要的。
然后我们要把这个文件在线生成语音信息和库文件,这一步需要登陆网站http://www.speech.cs.cmu.edu/tools/lmtool-new.html,根据网站的提示上传文件,然后在线编译生成库文件。
将生成的.lm和.dic文件下载到~/catkin_ws/src/rbx1/rbx1_speech/config文件下(备注:在线生成失败,正在寻找解决办法,不影响测试,例子中生成好了)
roscd rbx1_speech/config
rename -f ‘s/3026/nav_commands/’

roscd rbx1_speech/launch
cat voice_nav_commands.launch

<launch>

  <node name="recognizer" pkg="pocketsphinx" type="recognizer.py" output="screen">
    <param name="lm" value="$(find rbx1_speech)/config/nav_commands.lm"/>
    <param name="dict" value="$(find rbx1_speech)/config/nav_commands.dic"/>
  </node>

</launch>

测试
roslaunch rbx1_speech voice_nav_commands.launch
rostopic echo /recognizer/output
语音控制节点: roscd rbx1_speech/nodes voice_nav.py

#!/usr/bin/env python  

""" 
  voice_nav.py 
  Allows controlling a mobile base using simple speech commands.  
  Based on the voice_cmd_vel.py script by Michael Ferguson in 
  the pocketsphinx ROS package. 
  See http://www.ros.org/wiki/pocketsphinx 
"""  

import roslib; roslib.load_manifest('rbx1_speech')  
import rospy  
from geometry_msgs.msg import Twist  
from std_msgs.msg import String  
from math import copysign  

class VoiceNav:  
    def __init__(self):  
        rospy.init_node('voice_nav')  
        rospy.on_shutdown(self.cleanup)  

        # Set a number of parameters affecting the robot's speed  
        self.max_speed = rospy.get_param("~max_speed", 0.4)  
        self.max_angular_speed = rospy.get_param("~max_angular_speed", 1.5)  
        self.speed = rospy.get_param("~start_speed", 0.1)  
        self.angular_speed = rospy.get_param("~start_angular_speed", 0.5)  
        self.linear_increment = rospy.get_param("~linear_increment", 0.05)  
        self.angular_increment = rospy.get_param("~angular_increment", 0.4)  

        # We don't have to run the script very fast  

        self.rate = rospy.get_param("~rate", 5)  
        r = rospy.Rate(self.rate)  

        # A flag to determine whether or not voice control is paused  

        self.paused = False  

        # Initialize the Twist message we will publish.  

        self.cmd_vel = Twist()  

        # Publish the Twist message to the cmd_vel topic  

        self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)  

        # Subscribe to the /recognizer/output topic to receive voice commands.  

        rospy.Subscriber('/recognizer/output', String, self.speech_callback)  


        # A mapping from keywords or phrases to commands  

        self.keywords_to_command = {'stop': ['stop', 'halt', 'abort', 'kill', 'panic', 'off', 'freeze', 'shut down', 'turn off', 'help', 'help me'],  

                                    'slower': ['slow down', 'slower'],  

                                    'faster': ['speed up', 'faster'],  

                                    'forward': ['forward', 'ahead', 'straight'],  

                                    'backward': ['back', 'backward', 'back up'],  

                                    'rotate left': ['rotate left'],  

                                    'rotate right': ['rotate right'],  

                                    'turn left': ['turn left'],  

                                    'turn right': ['turn right'],  

                                    'quarter': ['quarter speed'],  

                                    'half': ['half speed'],  

                                    'full': ['full speed'],  

                                    'pause': ['pause speech'],  

                                    'continue': ['continue speech']}  


        rospy.loginfo("Ready to receive voice commands")  


        # We have to keep publishing the cmd_vel message if we want the robot to keep moving.  

        while not rospy.is_shutdown():  

            self.cmd_vel_pub.publish(self.cmd_vel)  

            r.sleep()                         

    def get_command(self, data):  

        # Attempt to match the recognized word or phrase to the   

        # keywords_to_command dictionary and return the appropriate  

        # command  

        for (command, keywords) in self.keywords_to_command.iteritems():  

            for word in keywords:  

                if data.find(word) > -1:  

                    return command  

    def speech_callback(self, msg):  

        # Get the motion command from the recognized phrase  

        command = self.get_command(msg.data)  

        # Log the command to the screen  

        rospy.loginfo("Command: " + str(command))  

        # If the user has asked to pause/continue voice control,  

        # set the flag accordingly   

        if command == 'pause':  

            self.paused = True  

        elif command == 'continue':  

            self.paused = False  

        # If voice control is paused, simply return without  

        # performing any action  

        if self.paused:  

            return         

        # The list of if-then statements should be fairly  

        # self-explanatory  

        if command == 'forward':      

            self.cmd_vel.linear.x = self.speed  

            self.cmd_vel.angular.z = 0  

        elif command == 'rotate left':  

            self.cmd_vel.linear.x = 0  

            self.cmd_vel.angular.z = self.angular_speed  

        elif command == 'rotate right':    

            self.cmd_vel.linear.x = 0        

            self.cmd_vel.angular.z = -self.angular_speed  

        elif command == 'turn left':  

            if self.cmd_vel.linear.x != 0:  

                self.cmd_vel.angular.z += self.angular_increment  

            else:          

                self.cmd_vel.angular.z = self.angular_speed   

        elif command == 'turn right':      

            if self.cmd_vel.linear.x != 0:  

                self.cmd_vel.angular.z -= self.angular_increment  

            else:          

                self.cmd_vel.angular.z = -self.angular_speed  

        elif command == 'backward':  

            self.cmd_vel.linear.x = -self.speed  

            self.cmd_vel.angular.z = 0  

        elif command == 'stop':   

            # Stop the robot!  Publish a Twist message consisting of all zeros.           

            self.cmd_vel = Twist()  

        elif command == 'faster':  

            self.speed += self.linear_increment  

            self.angular_speed += self.angular_increment  

            if self.cmd_vel.linear.x != 0:  

                self.cmd_vel.linear.x += copysign(self.linear_increment, self.cmd_vel.linear.x)  

            if self.cmd_vel.angular.z != 0:  

                self.cmd_vel.angular.z += copysign(self.angular_increment, self.cmd_vel.angular.z)  

        elif command == 'slower':  

            self.speed -= self.linear_increment  

            self.angular_speed -= self.angular_increment  

            if self.cmd_vel.linear.x != 0:  

                self.cmd_vel.linear.x -= copysign(self.linear_increment, self.cmd_vel.linear.x)  

            if self.cmd_vel.angular.z != 0:  

                self.cmd_vel.angular.z -= copysign(self.angular_increment, self.cmd_vel.angular.z)  

        elif command in ['quarter', 'half', 'full']:  

            if command == 'quarter':  

                self.speed = copysign(self.max_speed / 4, self.speed)  

            elif command == 'half':  

                self.speed = copysign(self.max_speed / 2, self.speed)  

            elif command == 'full':  

                self.speed = copysign(self.max_speed, self.speed)  

            if self.cmd_vel.linear.x != 0:  

                self.cmd_vel.linear.x = copysign(self.speed, self.cmd_vel.linear.x)  

            if self.cmd_vel.angular.z != 0:  

                self.cmd_vel.angular.z = copysign(self.angular_speed, self.cmd_vel.angular.z)  

        else:  
            return  

        self.cmd_vel.linear.x = min(self.max_speed, max(-self.max_speed, self.cmd_vel.linear.x))  

        self.cmd_vel.angular.z = min(self.max_angular_speed, max(-self.max_angular_speed, self.cmd_vel.angular.z))   

    def cleanup(self):  

        # When shutting down be sure to stop the robot!  

        twist = Twist()  

        self.cmd_vel_pub.publish(twist)  

        rospy.sleep(1)  

if __name__=="__main__":  

    try:  

        VoiceNav()  

        rospy.spin()  

    except rospy.ROSInterruptException:  

        rospy.loginfo("Voice navigation terminated.")

3、仿真测试
roslaunch rbx1_bringup fake_turtlebot.launch 运行一个机器人模型
rosrun rviz rviz -d rospack find rbx1_nav/sim.rviz 然后打开rviz
roslaunch rbx1_speech voice_nav_commands.launch 再打开语音识别节点
roslaunch rbx1_speech turtlebot_voice_nav.launch 最后打开机器人控制节点

4、播放语音
roscore
rosrun sound_play soundplay_node.py
rosrun sound_play say.py “Greetings Humans. Take me to your leader.”
可以换一个人来读:
sudo apt-get install festvox-don
rosrun sound_play say.py “Welcome to the future” voice_don_diphone
例子:换发音人
sudo apt-get install festlex-cmu
cd /usr/share/festival/voices/english
sudo wget -c http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_clb_arctic-0.95-release.tar.bz2
sudo wget -c http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_bdl_arctic-0.95-release.tar.bz2
sudo tar jxfv cmu_us_clb_arctic-0.95-release.tar.bz2
sudo tar jxfv cmu_us_bdl_arctic-0.95-release.tar.bz2
sudo rm cmu_us_clb_arctic-0.95-release.tar.bz2
sudo rm cmu_us_bdl_arctic-0.95-release.tar.bz2
sudo ln -s cmu_us_clb_arctic cmu_us_clb_arctic_clunits
sudo ln -s cmu_us_bdl_arctic cmu_us_bdl_arctic_clunits
测试:
rosrun sound_play say.py “I an speaking with a female C M U voice ” voice_cmu_us_clb_arctic_clunits
rosrun sound_play say.py “I an speaking with a male C M U voice ” voice_cmu_us_bld_arctic_clunits

语音说话节点: roscd rbx1_speech/nodes talkback.py

#!/usr/bin/env python  

""" 
    talkback.py - Version 0.1 2012-01-10 

    Use the sound_play client to say back what is heard by the pocketsphinx recognizer. 

    Created for the Pi Robot Project: http://www.pirobot.org 

    Copyright (c) 2012 Patrick Goebel.  All rights reserved. 

    This program is free software; you can redistribute it and/or modify 

    it under the terms of the GNU General Public License as published by 

    the Free Software Foundation; either version 2 of the License, or 

    (at your option) any later version.5 

    This program is distributed in the hope that it will be useful, 

    but WITHOUT ANY WARRANTY; without even the implied warranty of 

    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 

    GNU General Public License for more details at: 

    http://www.gnu.org/licenses/gpl.htmlPoint 

"""  


import roslib; roslib.load_manifest('rbx1_speech')  
import rospy  
from std_msgs.msg import String  
from sound_play.libsoundplay import SoundClient  
import sys  

class TalkBack:  
    def __init__(self, script_path):  
        rospy.init_node('talkback')   
        rospy.on_shutdown(self.cleanup)  

        # Set the default TTS voice to use  

        self.voice = rospy.get_param("~voice", "voice_don_diphone")  

        # Set the wave file path if used  

        self.wavepath = rospy.get_param("~wavepath", script_path + "/../sounds")   

        # Create the sound client object  

        self.soundhandle = SoundClient()   

        # Wait a moment to let the client connect to the  

        # sound_play server  

        rospy.sleep(1)   

        # Make sure any lingering sound_play processes are stopped.  

        self.soundhandle.stopAll()   

        # Announce that we are ready for input  

        self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")  

        rospy.sleep(1)  

        self.soundhandle.say("Ready", self.voice)  

        rospy.loginfo("Say one of the navigation commands...")   

        # Subscribe to the recognizer output and set the callback function  

        rospy.Subscriber('/recognizer/output', String, self.talkback)  

    def talkback(self, msg):  

        # Print the recognized words on the screen  

        rospy.loginfo(msg.data)  

        # Speak the recognized words in the selected voice  

        self.soundhandle.say(msg.data, self.voice)  

        # Uncomment to play one of the built-in sounds  

        #rospy.sleep(2)  

        #self.soundhandle.play(5)  

        # Uncomment to play a wave file  

        #rospy.sleep(2)  

        #self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")  

    def cleanup(self):  

        self.soundhandle.stopAll()  

        rospy.loginfo("Shutting down talkback node...")  

if __name__=="__main__":  

    try:  

        TalkBack(sys.path[0])  

        rospy.spin()  

    except rospy.ROSInterruptException:  

        rospy.loginfo("Talkback node terminated.")

演示:
roslaunch rbx1_speech talkback.launch

参考备注:
http://www.mamicode.com/info-detail-1787549.html
www.guyuehome.com

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值