这两天工作需要采集一个音频,然后分析了一下alsa_common的ros包,里面使用的是gstreamer 插件的形式,以前在deepstream中熟悉过使用方法 ,再次查看,感觉轻松了许多.
程序分为audio_capture和audio_player两部分,都 是使用的gstreamer,今天主要熟悉一下如何把数据和ros交互.
audio_capture.cpp
#include <stdio.h>
#include <gst/gst.h>
#include <gst/app/gstappsink.h>
#include <boost/thread.hpp>
#include <ros/ros.h>
#include "audio_common_msgs/AudioData.h"
namespace audio_transport
{
class RosGstCapture
{
public:
RosGstCapture()
{
_bitrate = 192;
std::string dst_type;
// Need to encoding or publish raw wave data
ros::param::param<std::string>("~format", _format, "mp3");
// The bitrate at which to encode the audio
ros::param::param<int>("~bitrate", _bitrate, 192);
// only available for raw data
ros::param::param<int>("~channels", _channels, 1);
ros::param::param<int>("~depth", _depth, 16);
ros::param::param<int>("~sample_rate", _sample_rate, 16000);
// The destination of the audio
ros::param::param<std::string>("~dst", dst_type, "appsink");
// The source of the audio
//ros::param::param<std::string>("~src", source_type, "alsasrc");
std::string device;
ros::param::param<std::string>("~device", device, "");
_pub = _nh.advertise<audio_common_msgs::AudioData>("audio", 10, true);
//以下这几步基本是固定的
_loop = g_main_loop_new(NULL, false);
_pipeline = gst_pipeline_new("ros_pipeline");
_bus = gst_pipeline_get_bus(GST_PIPELINE(_pipeline));
gst_bus_add_signal_watch(_bus);
g_signal_connect(_bus, "message::error",
G_CALLBACK(onMessage), this); //这里是连接一个message
g_object_unref(_bus);
// We create the sink first, just for convenience
if (dst_type == "appsink")
{
_sink = gst_element_factory_make("appsink", "sink");
g_object_set(G_OBJECT(_sink), "emit-signals", true, NULL); //默认这个属性是false
g_object_set(G_OBJECT(_sink), "max-buffers", 100, NULL); //默认是0,也就是无限制
//gst-inspect-1.0 appsink查看信号
/* "eos" : void user_function (GstElement* object,
gpointer user_data);
"new-preroll" : GstFlowReturn user_function (GstElement* object,
gpointer user_data);
"new-sample" : GstFlowReturn user_function (GstElement* object,
gpointer user_data);
*/
g_signal_connect( G_OBJECT(_sink), "new-sample", //appsink的数据通过new-sample的信号发送出去 gst-inspect-1.0 appsink查看信号
G_CALLBACK(onNewBuffer), this);
}
else
{
printf("file sink\n");
_sink = gst_element_factory_make("filesink", "sink");
g_object_set( G_OBJECT(_sink), "location", dst_type.c_str(), NULL);
}
_source = gst_element_factory_make("alsasrc", "source");
// if device isn't specified, it will use the default which is
// the alsa default source.
// A valid device will be of the foram hw:0,0 with other numbers
// than 0 and 0 as are available.
if (device != "")
{
// ghcar *gst_device = device.c_str();
g_object_set(G_OBJECT(_source), "device", device.c_str(), NULL);
}
_filter = gst_element_factory_make("capsfilter", "filter");
{
GstCaps *caps;
caps = gst_caps_new_simple("audio/x-raw",
// "channels", G_TYPE_INT, _channels,
// "depth", G_TYPE_INT, _depth,
"rate", G_TYPE_INT, _sample_rate,
// "signed", G_TYPE_BOOLEAN, TRUE,
NULL);
g_object_set( G_OBJECT(_filter), "caps", caps, NULL);
gst_caps_unref(caps);
}
_convert = gst_element_factory_make("audioconvert", "convert");
if (!_convert) {
ROS_ERROR_STREAM("Failed to create audioconvert element");
exitOnMainThread(1);
}
gboolean link_ok;
if (_format == "mp3"){
_encode = gst_element_factory_make("lamemp3enc", "encoder");
if (!_encode) {
ROS_ERROR_STREAM("Failed to create encoder element");
exitOnMainThread(1);
}
g_object_set( G_OBJECT(_encode), "quality", 2.0, NULL);
g_object_set( G_OBJECT(_encode), "bitrate", _bitrate, NULL);
gst_bin_add_many( GST_BIN(_pipeline), _source, _filter, _convert, _encode, _sink, NULL);
link_ok = gst_element_link_many(_source, _filter, _convert, _encode, _sink, NULL);
} else if (_format == "wave") {
GstCaps *caps;
caps = gst_caps_new_simple("audio/x-raw",
"channels", G_TYPE_INT, _channels,
"width", G_TYPE_INT, _depth,
"depth", G_TYPE_INT, _depth,
"rate", G_TYPE_INT, _sample_rate,
"signed", G_TYPE_BOOLEAN, TRUE,
NULL);
//only for appsink
g_object_set( G_OBJECT(_sink), "caps", caps, NULL);
gst_caps_unref(caps);
gst_bin_add_many( GST_BIN(_pipeline), _source, _sink, NULL);
link_ok = gst_element_link_many( _source, _sink, NULL);
} else {
ROS_ERROR_STREAM("format must be \"wave\" or \"mp3\"");
exitOnMainThread(1);
}
/*}
else
{
_sleep_time = 10000;
_source = gst_element_factory_make("filesrc", "source");
g_object_set(G_OBJECT(_source), "location", source_type.c_str(), NULL);
gst_bin_add_many( GST_BIN(_pipeline), _source, _sink, NULL);
gst_element_link_many(_source, _sink, NULL);
}
*/
if (!link_ok) {
ROS_ERROR_STREAM("Unsupported media type.");
exitOnMainThread(1);
}
gst_element_set_state(GST_ELEMENT(_pipeline), GST_STATE_PLAYING);
_gst_thread = boost::thread( boost::bind(g_main_loop_run, _loop) );
}
~RosGstCapture()
{
g_main_loop_quit(_loop);
gst_element_set_state(_pipeline, GST_STATE_NULL);
gst_object_unref(_pipeline);
g_main_loop_unref(_loop);
}
void exitOnMainThread(int code)
{
exit(code);
}
void publish( const audio_common_msgs::AudioData &msg )
{
_pub.publish(msg);
}
//取数据的方法很关键
static GstFlowReturn onNewBuffer (GstAppSink *appsink, gpointer userData)
{
RosGstCapture *server = reinterpret_cast<RosGstCapture*>(userData);
GstMapInfo map;
GstSample *sample;
g_signal_emit_by_name(appsink, "pull-sample", &sample); //发送一个pull-sample actions,读一个数据
GstBuffer *buffer = gst_sample_get_buffer(sample);
audio_common_msgs::AudioData msg;
gst_buffer_map(buffer, &map, GST_MAP_READ);//做一个读的映射
msg.data.resize( map.size );
memcpy( &msg.data[0], map.data, map.size );
gst_buffer_unmap(buffer, &map);//关闭映射
gst_sample_unref(sample); //释放sample
//发布topic
server->publish(msg);
return GST_FLOW_OK;
}
//从bus中读取消息,出错了就关闭退出程序
static gboolean onMessage (GstBus *bus, GstMessage *message, gpointer userData)
{
RosGstCapture *server = reinterpret_cast<RosGstCapture*>(userData);
GError *err;
gchar *debug;
gst_message_parse_error(message, &err, &debug);
ROS_ERROR_STREAM("gstreamer: " << err->message);
g_error_free(err);
g_free(debug);
g_main_loop_quit(server->_loop);
server->exitOnMainThread(1);
return FALSE;
}
private:
ros::NodeHandle _nh;
ros::Publisher _pub;
boost::thread _gst_thread;
GstElement *_pipeline, *_source, *_filter, *_sink, *_convert, *_encode;
GstBus *_bus;
int _bitrate, _channels, _depth, _sample_rate;
GMainLoop *_loop;
std::string _format;
};
}
int main (int argc, char **argv)
{
ros::init(argc, argv, "audio_capture");
gst_init(&argc, &argv);
audio_transport::RosGstCapture server;
ros::spin();
}在这里插入代码片
audio_player
#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <ros/ros.h>
#include <boost/thread.hpp>
#include "audio_common_msgs/AudioData.h"
namespace audio_transport
{
class RosGstPlay
{
public:
RosGstPlay()
{
GstPad *audiopad;
std::string dst_type;
std::string device;
bool do_timestamp;
// The destination of the audio
ros::param::param<std::string>("~dst", dst_type, "alsasink");
ros::param::param<std::string>("~device", device, std::string());
ros::param::param<bool>("~do_timestamp", do_timestamp, true);
_sub = _nh.subscribe("audio", 10, &RosGstPlay::onAudio, this);
//先g_main_loop_new一个loop
_loop = g_main_loop_new(NULL, false);
//然后创建一个pipeline,这是一个主要的是bin
_pipeline = gst_pipeline_new("app_pipeline");
_source = gst_element_factory_make("appsrc", "app_source");
g_object_set(G_OBJECT(_source), "do-timestamp", (do_timestamp) ? TRUE : FALSE, NULL);
// g_object_set(G_OBJECT(_source), "do-timestamp", TRUE, NULL);
gst_bin_add( GST_BIN(_pipeline), _source);
//_playbin = gst_element_factory_make("playbin2", "uri_play");
//g_object_set( G_OBJECT(_playbin), "uri", "file:///home/test/test.mp3", NULL);
if (dst_type == "alsasink")
{
_decoder = gst_element_factory_make("decodebin", "decoder");
/*
Element Signals:
"pad-added" : void user_function (GstElement* object,
GstPad* arg0,
gpointer user_data);
*/
g_signal_connect(_decoder, "pad-added", G_CALLBACK(cb_newpad),this);
gst_bin_add( GST_BIN(_pipeline), _decoder);
gst_element_link(_source, _decoder);
_audio = gst_bin_new("audiobin");
_convert = gst_element_factory_make("audioconvert", "convert");
//取一个pad,用于audio_bin的连接
audiopad = gst_element_get_static_pad(_convert, "sink");
_sink = gst_element_factory_make("autoaudiosink", "sink");//使用autoaudiosink作为最终的输出
if (!device.empty()) {
g_object_set(G_OBJECT(_sink), "device", device.c_str(), NULL); //配置使用哪一个设备输出
}
gst_bin_add_many( GST_BIN(_audio), _convert, _sink, NULL); //bin中添加两个plugin
gst_element_link(_convert, _sink);//连接convert 和 sink
gst_element_add_pad(_audio, gst_ghost_pad_new("sink", audiopad));//把convert的sink接到bin的pad上,这里会触发上面的pad-added
gst_object_unref(audiopad);
//把audio bin加到pipeline中
gst_bin_add(GST_BIN(_pipeline), _audio);
}
else
{
_sink = gst_element_factory_make("filesink", "sink");
g_object_set( G_OBJECT(_sink), "location", dst_type.c_str(), NULL);
gst_bin_add(GST_BIN(_pipeline), _sink);
gst_element_link(_source, _sink);
}
gst_element_set_state(GST_ELEMENT(_pipeline), GST_STATE_PLAYING);
//gst_element_set_state(GST_ELEMENT(_playbin), GST_STATE_PLAYING);
_gst_thread = boost::thread( boost::bind(g_main_loop_run, _loop) );
}
private:
//接收到订阅的消息时
void onAudio(const audio_common_msgs::AudioDataConstPtr &msg)
{
GstBuffer *buffer = gst_buffer_new_and_alloc(msg->data.size());//申请大小
gst_buffer_fill(buffer, 0, &msg->data[0], msg->data.size());//填充数据
GstFlowReturn ret;
g_signal_emit_by_name(_source, "push-buffer", buffer, &ret); //把数据通过action的方式把数据传给appsrc
}
//decodebin pad-added信号的回调函数
static void cb_newpad (GstElement *decodebin, GstPad *pad,
gpointer data)
{
RosGstPlay *client = reinterpret_cast<RosGstPlay*>(data);
GstCaps *caps;
GstStructure *str;
GstPad *audiopad;
/* only link once */
audiopad = gst_element_get_static_pad (client->_audio, "sink");//这个就是 audio bin
if (GST_PAD_IS_LINKED (audiopad))
{
g_object_unref (audiopad);
return;
}
/* check media type */
caps = gst_pad_query_caps (pad, NULL);
str = gst_caps_get_structure (caps, 0);
if (!g_strrstr (gst_structure_get_name (str), "audio")) {
gst_caps_unref (caps);
gst_object_unref (audiopad);
return;
}
gst_caps_unref (caps);
/* link'n'play */
gst_pad_link (pad, audiopad); //把audio bin的sink和 decodebin的src pad连接
g_object_unref (audiopad);
}
ros::NodeHandle _nh;
ros::Subscriber _sub;
boost::thread _gst_thread;
GstElement *_pipeline, *_source, *_sink, *_decoder, *_convert, *_audio;
GstElement *_playbin;
GMainLoop *_loop;
};
}
int main (int argc, char **argv)
{
ros::init(argc, argv, "audio_play");
gst_init(&argc, &argv);
audio_transport::RosGstPlay client;
ros::spin();
}