ros melodic源码分析之connect_manager

一、首先分析transport

transport封装了tcp udp通信,将socket基本操作封装起来,加入一堆回调函数,当事件触发时调用相应的回调函数。

先看transport基类:

File: ros_comm/roscpp/include/ros/transport/transport.h

56: class ROSCPP_DECL Transport : public boost::enable_shared_from_this<Transport>
57: {
58: public:
        //读写函数
68:   virtual int32_t read(uint8_t* buffer, uint32_t size) = 0;
75:   virtual int32_t write(uint8_t* buffer, uint32_t size) = 0;
        //读写控制
80:   virtual void enableWrite() = 0;
84:   virtual void disableWrite() = 0;
89:   virtual void enableRead() = 0;
93:   virtual void disableRead() = 0;
98:   virtual void close() = 0;

        //回调函数,设置断开连接 读 写的回调函数
106:   typedef boost::function<void(const TransportPtr&)> Callback;
110:   void setDisconnectCallback(const Callback& cb) { disconnect_cb_ = cb; }
114:   void setReadCallback(const Callback& cb) { read_cb_ = cb; }
118:   void setWriteCallback(const Callback& cb) { write_cb_ = cb; }


128:   virtual bool requiresHeader() {return true;}

133:   virtual void parseHeader(const Header& header) { (void)header; }
134: 
135: protected:
136:   Callback disconnect_cb_;
137:   Callback read_cb_;
138:   Callback write_cb_;
149: 
150: private:
151:   bool only_localhost_allowed_;
152:   std::vector<std::string> allowed_hosts_;
153: };

Transport基类定义了读写、读写控制纯虚方法,继承类需要实现。定义了设置断开连接、读、写回调函数的方法。

接下来我们看TransportTCP类声明:

File: src/ros_comm/roscpp/include/ros/transport/transport_tcp.h

57: class ROSCPP_DECL TransportTCP : public Transport
58: {
59: public:
71:   TransportTCP(PollSet* poll_set, int flags = 0);

80:   bool connect(const std::string& host, int port);

87:   typedef boost::function<void(const TransportTCPPtr&)> AcceptCallback;

94:   bool listen(int port, int backlog, const AcceptCallback& accept_cb);

98:   TransportTCPPtr accept();


        //override基类方法
112:   virtual int32_t read(uint8_t* buffer, uint32_t size);
113:   virtual int32_t write(uint8_t* buffer, uint32_t size);
115:   virtual void enableWrite();
116:   virtual void disableWrite();
117:   virtual void enableRead();
118:   virtual void disableRead();

123: 
124:   virtual void parseHeader(const Header& header);
125: 
126:   virtual const char* getType() { return "TCPROS"; }
127: 
128: private:

132:   bool initializeSocket();133: 

141:   bool setSocket(int sock);
142: 
143:   void socketUpdate(int events);
144: 
145:   socket_fd_t sock_;
146:   bool closed_;
147:   boost::recursive_mutex close_mutex_;
148: 
149:   bool expecting_read_;
150:   bool expecting_write_;
151: 
152:   bool is_server_;
153:   sockaddr_storage server_address_;
154:   socklen_t sa_len_;
155:   sockaddr_storage local_address_;
156:   socklen_t la_len_;
157: 
158:   int server_port_;
159:   int local_port_;
160:   AcceptCallback accept_cb_;
161: 
162:   std::string cached_remote_host_;
163: 
164:   PollSet* poll_set_;  //poll_set在之前的文章有分享
165:   int flags_;
166: 
167:   std::string connected_host_;
168:   int connected_port_;
169: };

接下来看TransportTCP实现:

File: src/ros_comm/roscpp/src/libros/transport/transport_tcp.cpp

54: TransportTCP::TransportTCP(PollSet* poll_set, int flags)
55: : sock_(ROS_INVALID_SOCKET)
56: , closed_(false)
57: , expecting_read_(false)
58: , expecting_write_(false)
59: , is_server_(false)
60: , server_port_(-1)
61: , local_port_(-1)
62: , poll_set_(poll_set)
63: , flags_(flags)
64: {
65: 
66: }
72: 
73: bool TransportTCP::setSocket(int sock)
74: {
75:   sock_ = sock;
76:   return initializeSocket();
77: }

93: 
94: bool TransportTCP::initializeSocket()
95: {
138: 
140:   if (poll_set_)
141:   {
143:     poll_set_->addSocket(sock_, boost::bind(&TransportTCP::socketUpdate, this, _1), shared_from_this());
148:   }
154: 
155:   return true;
156: }
228: 

663: void TransportTCP::socketUpdate(int events)
664: {
665:   {
672:     // Handle read events before err/hup/nval, since there may be data left on the wire
673:     if ((events & POLLIN) && expecting_read_)
674:     {
675:       if (is_server_)
676:       {
677:         // Should not block here, because poll() said that it's ready
678:         // for reading
679:         TransportTCPPtr transport = accept();
680:         if (transport)
681:         {
                //调用accept监控callback
682:           ROS_ASSERT(accept_cb_);
683:           accept_cb_(transport);
684:         }
685:       }
686:       else
687:       {
                //调用读监控callback
688:         if (read_cb_)
689:         {
690:           read_cb_(shared_from_this());
691:         }
692:       }
693:     }
694:     //调用写监控callback
695:     if ((events & POLLOUT) && expecting_write_)
696:     {
697:       if (write_cb_)
698:       {
699:         write_cb_(shared_from_this());
700:       }
701:     }
702:   }
703: 
727: }

我们看到TransportTCP::setSocket–>initializeSocket()–>poll_set_->addSocket(sock_, boost::bind(&TransportTCP::socketUpdate, this, _1), shared_from_this());
这样在poll_manager大循环中会监视sock_变化,当有事件变化时调用TransportTCP::socketUpdate,然后调用accept,read或者write回调函数。

229: bool TransportTCP::connect(const std::string& host, int port)
230: {
234:   sock_ = socket(s_use_ipv6_ ? AF_INET6 : AF_INET, SOCK_STREAM, 0);
235:   connected_host_ = host;
236:   connected_port_ = port;

329:   int ret = ::connect(sock_, (sockaddr*) &sas, sas_len);
369: }
370: 
371: bool TransportTCP::listen(int port, int backlog, const AcceptCallback& accept_cb)
372: {
373:   is_server_ = true;
374:   accept_cb_ = accept_cb;
375: 
389:     sock_ = socket(AF_INET, SOCK_STREAM, 0);

406:   if (bind(sock_, (sockaddr *)&server_address_, sa_len_) < 0)
407:   {
408:     ROS_ERROR("bind() failed with error [%s]", last_socket_error_string());
409:     return false;
410:   }
411: 
412:   ::listen(sock_, backlog);
413:   getsockname(sock_, (sockaddr *)&server_address_, &sa_len_);
414: 
424: 
425:   if (!initializeSocket())

434: 
435:   return true;
436: }

TransportTCP::connect调用了socket ::connect方法,说明是tcp client才会调用的接口。
TransportTCP::listen调用了socket bind ::listen操作,说明是tcp server才会调用的接口。

484: int32_t TransportTCP::read(uint8_t* buffer, uint32_t size)
485: {
497: 
498:   // never read more than INT_MAX since this is the maximum we can report back with the current return type
499:   uint32_t read_size = std::min(size, static_cast<uint32_t>(INT_MAX));
500:   int num_bytes = ::recv(sock_, reinterpret_cast<char*>(buffer), read_size, 0);
519: 
520:   return num_bytes;
521: }
522: 
523: int32_t TransportTCP::write(uint8_t* buffer, uint32_t size)
524: {

538:   uint32_t writesize = std::min(size, static_cast<uint32_t>(INT_MAX));
539:   int num_bytes = ::send(sock_, reinterpret_cast<const char*>(buffer), writesize, 0);
553:   return num_bytes;
554: }

定义了基本读写接口。

555: 
556: void TransportTCP::enableRead()
557: {
569:   if (!expecting_read_)
570:   {
571:     poll_set_->addEvents(sock_, POLLIN);
572:     expecting_read_ = true;
573:   }
574: }
575: 
576: void TransportTCP::disableRead()
577: {
589:   if (expecting_read_)
590:   {
591:     poll_set_->delEvents(sock_, POLLIN);
592:     expecting_read_ = false;
593:   }
594: }
595: 
596: void TransportTCP::enableWrite()
597: {
609:   if (!expecting_write_)
610:   {
611:     poll_set_->addEvents(sock_, POLLOUT);
612:     expecting_write_ = true;
613:   }
614: }
615: 
616: void TransportTCP::disableWrite()
617: {
628: 
629:   if (expecting_write_)
630:   {
631:     poll_set_->delEvents(sock_, POLLOUT);
632:     expecting_write_ = false;
633:   }
634: }
635: 

以上代码定义了读写控制接口。

636: TransportTCPPtr TransportTCP::accept()
637: {
638:   ROS_ASSERT(is_server_);
639: 
640:   sockaddr client_address;
641:   socklen_t len = sizeof(client_address);
642:   int new_sock = ::accept(sock_, (sockaddr *)&client_address, &len);

647:     TransportTCPPtr transport(boost::make_shared<TransportTCP>(poll_set_, flags_));
648:     if (!transport->setSocket(new_sock))
6
661: }
662: 

以上定义了accept接口。

从TransportTCP声明与实现看,实现了基本的tcp接口封装,有事件发生时调用相应的回调函数。

接下来看TransportUDP声明:

File: src/ros_comm/roscpp/include/ros/transport/transport_udp.h

57: typedef struct TransportUDPHeader {
58:   uint32_t connection_id_;
59:   uint8_t op_;
60:   uint8_t message_id_;
61:   uint16_t block_;
62: } TransportUDPHeader;
63: 
64: /**
65:  * \brief UDPROS transport
66:  */
67: class ROSCPP_DECL TransportUDP : public Transport
68: {
69: public:

75:   TransportUDP(PollSet* poll_set, int flags = 0, int max_datagram_size = 0);

84:   bool connect(const std::string& host, int port, int conn_id);


95:   bool createIncoming(int port, bool is_server);
99:   TransportUDPPtr createOutgoing(std::string host, int port, int conn_id, int max_datagram_size);

104: 
105:   // overrides from Transport
106:   virtual int32_t read(uint8_t* buffer, uint32_t size);
107:   virtual int32_t write(uint8_t* buffer, uint32_t size);
108: 
109:   virtual void enableWrite();
110:   virtual void disableWrite();
111:   virtual void enableRead();
112:   virtual void disableRead();
113: 
114:   virtual void close();
115: 
117: 
118:   virtual bool requiresHeader() {return false;}
119: 
120:   virtual const char* getType() {return "UDPROS";}
121: 
122:   int getMaxDatagramSize() const {return max_datagram_size_;}
123: 
124: private:

128:   bool initializeSocket();

135:   bool setSocket(int sock);
136: 
137:   void socketUpdate(int events);
138: 
139:   socket_fd_t sock_;
140:   bool closed_;
141:   boost::mutex close_mutex_;
142: 
143:   bool expecting_read_;
144:   bool expecting_write_;
145: 
146:   bool is_server_;
147:   sockaddr_in server_address_;
148:   sockaddr_in local_address_;
149:   int server_port_;
150:   int local_port_;
151: 
152:   std::string cached_remote_host_;
153: 
154:   PollSet* poll_set_;
155:   int flags_;
156: 
157:   uint32_t connection_id_;
158:   uint8_t current_message_id_;
159:   uint16_t total_blocks_;
160:   uint16_t last_block_;
161: 
162:   uint32_t max_datagram_size_;
163: 
164:   uint8_t* data_buffer_;
165:   uint8_t* data_start_;
166:   uint32_t data_filled_;
167: 
168:   uint8_t* reorder_buffer_;
169:   uint8_t* reorder_start_;
170:   TransportUDPHeader reorder_header_;
171:   uint32_t reorder_bytes_;
172: };

以上定义了udp接口,实现如下:

File: src/ros_comm/roscpp/src/libros/transport/transport_udp.cpp

62: TransportUDP::TransportUDP(PollSet* poll_set, int flags, int max_datagram_size)
63: : sock_(-1)
64: , closed_(false)
65: , expecting_read_(false)
66: , expecting_write_(false)
67: , is_server_(false)
68: , server_address_{}
69: , local_address_{}
70: , server_port_(-1)
71: , local_port_(-1)
72: , poll_set_(poll_set)
73: , flags_(flags)
74: , connection_id_(0)
75: , current_message_id_(0)
76: , total_blocks_(0)
77: , last_block_(0)
78: , max_datagram_size_(max_datagram_size)
79: , data_filled_(0)
80: , reorder_buffer_(0)
81: , reorder_bytes_(0)
82: {
83:   // This may eventually be machine dependent
84:   if (max_datagram_size_ == 0)
85:     max_datagram_size_ = 1500;
86:   reorder_buffer_ = new uint8_t[max_datagram_size_];
87:   reorder_start_ = reorder_buffer_;
88:   data_buffer_ = new uint8_t[max_datagram_size_];
89:   data_start_ = data_buffer_;
90: } 
98: 
99: bool TransportUDP::setSocket(int sock)
100: {
101:   sock_ = sock;
102:   return initializeSocket();
103: }
280: bool TransportUDP::initializeSocket()
281: {

299:   if (poll_set_)
300:   {
301:     poll_set_->addSocket(sock_, boost::bind(&TransportUDP::socketUpdate, this, _1), shared_from_this());
302:   }
303: 
305: }
105: void TransportUDP::socketUpdate(int events)
106: {

116:   if((events & POLLERR) ||
117:      (events & POLLHUP) ||
118:      (events & POLLNVAL))
119:   {
120:     ROSCPP_LOG_DEBUG("Socket %d closed with (ERR|HUP|NVAL) events %d", sock_, events);
121:     close();
122:   }
123:   else
124:   {
125:     if ((events & POLLIN) && expecting_read_)
126:     {
            //调用读callback
127:       if (read_cb_)
128:       {
129:         read_cb_(shared_from_this());
130:       }
131:     }
132: 
133:     if ((events & POLLOUT) && expecting_write_)
134:     {
            //调用写callback
135:       if (write_cb_)
136:       {
137:         write_cb_(shared_from_this());
138:       }
139:     }
140:   }
142: }

上述代码与TCP基本一样。

150: 
151: bool TransportUDP::connect(const std::string& host, int port, int connection_id)
152: {
212: 
213:   if (::connect(sock_, (sockaddr *)&sin, sizeof(sin)))

232:   if (!initializeSocket())

240: }

694: TransportUDPPtr TransportUDP::createOutgoing(std::string host, int port, int connection_id, int max_datagram_size)
695: {
696:   ROS_ASSERT(is_server_);
697:   
698:   TransportUDPPtr transport(boost::make_shared<TransportUDP>(poll_set_, flags_, max_datagram_size));
699:   if (!transport->connect(host, port, connection_id))

704:   return transport;
705: 
706: }
707: 
241: 
242: bool TransportUDP::createIncoming(int port, bool is_server)
243: {
244:   is_server_ = is_server;
245: 
246:   sock_ = socket(AF_INET, SOCK_DGRAM, 0);

259:   if (bind(sock_, (sockaddr *)&server_address_, sizeof(server_address_)) < 0)

270:   if (!initializeSocket())

275:   enableRead();
276: 
277:   return true;
278: }

上述代码调用关系是(–>代表调用):TransportUDP::createOutgoing–>TransportUDP::connect->::connect, 说明createOutgoing是udp client调用的接口。

TransportUDP::createIncoming调用了socket和bind,说明TransportUDP::createIncoming是UDP server调用的接口。

351: int32_t TransportUDP::read(uint8_t* buffer, uint32_t size)
352: {
353:   {
354:     boost::mutex::scoped_lock lock(close_mutex_);
355:     if (closed_)
356:     {
357:       ROSCPP_LOG_DEBUG("Tried to read on a closed socket [%d]", sock_);
358:       return -1;
359:     }
360:   }
361: 
362:   ROS_ASSERT((int32_t)size > 0);
363: 
364:   uint32_t bytes_read = 0;
365: 
366:   while (bytes_read < size)
367:   {
368:     TransportUDPHeader header;
369: 
370:     // Get the data either from the reorder buffer or the socket
371:     // copy_bytes will contain the read size.
372:     // from_previous is true if the data belongs to the previous UDP datagram.
373:     uint32_t copy_bytes = 0;
374:     bool from_previous = false;
375:     if (reorder_bytes_)
376:     {
377:       if (reorder_start_ != reorder_buffer_)
378:       {
379:         from_previous = true;
380:       }
381: 
382:       copy_bytes = std::min(size - bytes_read, reorder_bytes_);
383:       header = reorder_header_;
384:       memcpy(buffer + bytes_read, reorder_start_, copy_bytes);
385:       reorder_bytes_ -= copy_bytes;
386:       reorder_start_ += copy_bytes;
387:     }
388:     else
389:     {
390:       if (data_filled_ == 0)
391:       {
408:         ssize_t num_bytes;
409:         struct iovec iov[2];
410:         iov[0].iov_base = &header;
411:         iov[0].iov_len = sizeof(header);
412:         iov[1].iov_base = data_buffer_;
413:         iov[1].iov_len = max_datagram_size_ - sizeof(header);
414:         // Read a datagram with header
415:         num_bytes = readv(sock_, iov, 2);
443: 
444: 		num_bytes -= sizeof(header);
445:         data_filled_ = num_bytes;
446:         data_start_ = data_buffer_;
447:       }
448:       else
449:       {
450:         from_previous = true;
451:       }
452: 
453:       copy_bytes = std::min(size - bytes_read, data_filled_);
454:       // Copy from the data buffer, whether it has data left in it from a previous datagram or
455:       // was just filled by readv()
456:       memcpy(buffer + bytes_read, data_start_, copy_bytes);
457:       data_filled_ -= copy_bytes;
458:       data_start_ += copy_bytes;
459:     }
460: 
461: 
462:     if (from_previous)
463:     {
464:       // We are simply reading data from the last UDP datagram, nothing to
465:       // parse
466:       bytes_read += copy_bytes;
467:     }
468:     else
469:     {
470:       // This datagram is new, process header
471:       switch (header.op_)
472:       {
473:         case ROS_UDP_DATA0:
474:           if (current_message_id_)
475:           {
476:             ROS_DEBUG("Received new message [%d:%d], while still working on [%d] (block %d of %d)", header.message_id_, header.block_, current_message_id_, last_block_ + 1, total_blocks_);
477:             reorder_header_ = header;
478: 
479:             // Copy the entire data buffer to the reorder buffer, as we will
480:             // need to replay this UDP datagram in the next call.
481:             reorder_bytes_ = data_filled_ + (data_start_ - data_buffer_);
482:             memcpy(reorder_buffer_, data_buffer_, reorder_bytes_);
483:             reorder_start_ = reorder_buffer_;
484:             current_message_id_ = 0;
485:             total_blocks_ = 0;
486:             last_block_ = 0;
487: 
488:             data_filled_ = 0;
489:             data_start_ = data_buffer_;
490:             return -1;
491:           }
492:           total_blocks_ = header.block_;
493:           last_block_ = 0;
494:           current_message_id_ = header.message_id_;
495:           break;
496:         case ROS_UDP_DATAN:
497:           if (header.message_id_ != current_message_id_)
498:           {
499:             ROS_DEBUG("Message Id mismatch: %d != %d", header.message_id_, current_message_id_);
500:             data_filled_ = 0; // discard datagram
501:             return 0;
502:           }
503:           if (header.block_ != last_block_ + 1)
504:           {
505:             ROS_DEBUG("Expected block %d, received %d", last_block_ + 1, header.block_);
506:             data_filled_ = 0; // discard datagram
507:             return 0;
508:           }
509:           last_block_ = header.block_;
510: 
511:           break;
512:         default:
513:           ROS_ERROR("Unexpected UDP header OP [%d]", header.op_);
514:           return -1;
515:       }
516: 
517:       bytes_read += copy_bytes;
518: 
519:       if (last_block_ == (total_blocks_ - 1))
520:       {
521:         current_message_id_ = 0;
522:         break;
523:       }
524:     }
525:   }
526: 
527:   return bytes_read;
528: }
529: 
530: int32_t TransportUDP::write(uint8_t* buffer, uint32_t size)
531: {

550:   while (bytes_sent < size)
551:   {
558:     ssize_t num_bytes = writev(sock_, iov, 2);
612:     num_bytes -= sizeof(header);
614:     bytes_sent += num_bytes;
615:   }
616: 
617:   return bytes_sent;
618: }

read函数逻辑复杂,不过其核心调用函数是readv(sock_, iov, 2);,处理逻辑大家只能参考源码去分析了。
写函数核心调用writev(sock_, iov, 2);处理稍微简单些。

619: 
620: void TransportUDP::enableRead()
621: {
630: 
631:   if (!expecting_read_)
632:   {
633:     poll_set_->addEvents(sock_, POLLIN);
634:     expecting_read_ = true;
635:   }
636: }
637: 
638: void TransportUDP::disableRead()
639: {
651:   if (expecting_read_)
652:   {
653:     poll_set_->delEvents(sock_, POLLIN);
654:     expecting_read_ = false;
655:   }
656: }
657: 
658: void TransportUDP::enableWrite()
659: {
668: 
669:   if (!expecting_write_)
670:   {
671:     poll_set_->addEvents(sock_, POLLOUT);
672:     expecting_write_ = true;
673:   }
674: }
675: 
676: void TransportUDP::disableWrite()
677: {
687:   if (expecting_write_)
688:   {
689:     poll_set_->delEvents(sock_, POLLOUT);
690:     expecting_write_ = false;
691:   }
692: }

上述定义了读写控制。
TransportUDP封装了UDP的基本操作,有事件发生时回调用相应的回调函数。
有个问题自然就浮现出来了,transport费这么大劲定义了tcp udp基础接口,定义了一堆回调函数,却没有看到哪里在用?接下来我们看connection,就使用了上述定义的接口。

二、connection分析

File: src/ros_comm/roscpp/include/ros/connection.h

55: class Transport;
56: typedef boost::shared_ptr<Transport> TransportPtr;
57: class Connection;
58: typedef boost::shared_ptr<Connection> ConnectionPtr;
59: typedef boost::function<void(const ConnectionPtr&, const boost::shared_array<uint8_t>&, uint32_t, bool)> ReadFinishedFunc;
60: typedef boost::function<void(const ConnectionPtr&)> WriteFinishedFunc;
61: 
62: typedef boost::function<bool(const ConnectionPtr&, const Header&)> HeaderReceivedFunc;

70: class ROSCPP_DECL Connection : public boost::enable_shared_from_this<Connection>
71: {
72: public:

        //定义connection的初始化函数,接受TransportPtr&引用
86:   void initialize(const TransportPtr& transport, bool is_server, const HeaderReceivedFunc& header_func);

        //声明了写header 读 写接口,注意每个接口都带有回调函数
113:   void writeHeader(const M_string& key_vals, const WriteFinishedFunc& finished_callback);
128:   void read(uint32_t size, const ReadFinishedFunc& finished_callback);
145:   void write(const boost::shared_array<uint8_t>& buffer, uint32_t size, const WriteFinishedFunc& finished_callback, bool immedate = true);
146: 
        //声明drop时要执行的动作
147:   typedef boost::signals2::signal<void(const ConnectionPtr&, DropReason reason)> DropSignal;
148:   typedef boost::function<void(const ConnectionPtr&, DropReason reason)> DropFunc;
152:   boost::signals2::connection addDropListener(const DropFunc& slot);
153:   void removeDropListener(const boost::signals2::connection& c);

158:   void setHeaderReceivedCallback(const HeaderReceivedFunc& func);

178: private:
        //定义一系列事件回调函数
182:   void onReadable(const TransportPtr& transport);
186:   void onWriteable(const TransportPtr& transport);
191:   void onDisconnect(const TransportPtr& transport);
194:   void onHeaderWritten(const ConnectionPtr& conn);
195:   void onErrorHeaderWritten(const ConnectionPtr& conn);
196:   void onHeaderLengthRead(const ConnectionPtr& conn, const boost::shared_array<uint8_t>& buffer, uint32_t size, bool success);
197:   void onHeaderRead(const ConnectionPtr& conn, const boost::shared_array<uint8_t>& buffer, uint32_t size, bool success);

        //读写Transport
203:   void readTransport();
207:   void writeTransport();
208: 
209:   /// Are we a server?  Servers wait for clients to send a header and then send a header in response.
210:   bool is_server_;
211:   /// Have we dropped?
212:   bool dropped_;
213:   /// Incoming header
214:   Header header_;
215:   /// Transport associated with us
216:   TransportPtr transport_;
217:   /// Function that handles the incoming header
218:   HeaderReceivedFunc header_func_;
219: 
220:   /// Read buffer that ends up being passed to the read callback
221:   boost::shared_array<uint8_t> read_buffer_;
222:   /// Amount of data currently in the read buffer, in bytes
223:   uint32_t read_filled_;
224:   /// Size of the read buffer, in bytes
225:   uint32_t read_size_;
226:   /// Function to call when the read is finished
227:   ReadFinishedFunc read_callback_;
228:   /// Mutex used for protecting reading.  Recursive because a read can immediately cause another read through the callback.
229:   boost::recursive_mutex read_mutex_;
230:   /// Flag telling us if we're in the middle of a read (mostly to avoid recursive deadlocking)
231:   bool reading_;
232:   /// flag telling us if there is a read callback
233:   /// 32-bit loads and stores are atomic on x86 and PPC... TODO: use a cross-platform atomic operations library
234:   /// to ensure this is done atomically
235:   volatile uint32_t has_read_callback_;
236: 
237:   /// Buffer to write from
238:   boost::shared_array<uint8_t> write_buffer_;
239:   /// Amount of data we've written from the write buffer
240:   uint32_t write_sent_;
241:   /// Size of the write buffer
242:   uint32_t write_size_;
243:   /// Function to call when the current write is finished
244:   WriteFinishedFunc write_callback_;
245:   boost::mutex write_callback_mutex_;
246:   /// Mutex used for protecting writing.  Recursive because a write can immediately cause another write through the callback
247:   boost::recursive_mutex write_mutex_;
248:   /// Flag telling us if we're in the middle of a write (mostly used to avoid recursive deadlocking)
249:   bool writing_;
250:   /// flag telling us if there is a write callback
251:   /// 32-bit loads and stores are atomic on x86 and PPC... TODO: use a cross-platform atomic operations library
252:   /// to ensure this is done atomically
253:   volatile uint32_t has_write_callback_;
254: 
255:   /// Function to call when the outgoing header has finished writing
256:   WriteFinishedFunc header_written_callback_;
257: 
258:   /// Signal raised when this connection is dropped
259:   DropSignal drop_signal_;
260: 
261:   /// Synchronizes drop() calls
262:   boost::recursive_mutex drop_mutex_;
263: 
264:   /// If we're sending a header error we disable most other calls
265:   bool sending_header_error_;
266: };
267: typedef boost::shared_ptr<Connection> ConnectionPtr;

上述定义中connection的初始化函数接受TransportPtr&引用,这样就可以多态处理TCP UDP了。
声明了写header 读 写接口,注意每个接口都带有回调函数。
定义一系列事件回调函数,这些函数会在写header 读 写接口作为回调函数传入执行。
声明drop时要执行的动作。

接下来看connetcion的实现:

void Connection::initialize(const TransportPtr& transport, bool is_server, const HeaderReceivedFunc& header_func)
{

  transport_ = transport;
  header_func_ = header_func;
  is_server_ = is_server;

  transport_->setReadCallback(boost::bind(&Connection::onReadable, this, _1));
  transport_->setWriteCallback(boost::bind(&Connection::onWriteable, this, _1));
  transport_->setDisconnectCallback(boost::bind(&Connection::onDisconnect, this, _1));

  if (header_func)
  {
    read(4, boost::bind(&Connection::onHeaderLengthRead, this, _1, _2, _3, _4));
  }
}

264: void Connection::read(uint32_t size, const ReadFinishedFunc& callback)
265: {
271:   {
272:     boost::recursive_mutex::scoped_lock lock(read_mutex_);273: 
275:     //将callback保存到read_callback_
276:     read_callback_ = callback;
277:     read_buffer_ = boost::shared_array<uint8_t>(new uint8_t[size]);
278:     read_size_ = size;
279:     read_filled_ = 0;
280:     has_read_callback_ = 1;
281:   }
282: 
283:   transport_->enableRead();
284: 
285:   // read immediately if possible
286:   readTransport();
287: }

289: void Connection::write(const boost::shared_array<uint8_t>& buffer, uint32_t size, const WriteFinishedFunc& callback, bool immediate)
290: {
291:   if (dropped_ || sending_header_error_)
292:   {
293:     return;
294:   }
295: 
296:   {
297:     boost::mutex::scoped_lock lock(write_callback_mutex_);
298: 
300:    //将callback保存到write_callback_
301:     write_callback_ = callback;
302:     write_buffer_ = buffer;
303:     write_size_ = size;
304:     write_sent_ = 0;
305:     has_write_callback_ = 1;
306:   }
307: 
308:   transport_->enableWrite();
309: 
310:   if (immediate)
311:   {
312:     // write immediately if possible
313:     writeTransport();
314:   }
315: }

void Connection::onReadable(const TransportPtr& transport)
{
  (void)transport;
  ROS_ASSERT(transport == transport_);

  readTransport();
}

void Connection::readTransport()
{
  while (!dropped_ && has_read_callback_)
  {
    if (to_read > 0)
    {
      int32_t bytes_read = transport_->read(read_buffer_.get() + read_filled_, to_read);
      ROS_DEBUG_NAMED("superdebug", "Connection read %d bytes", bytes_read);

      read_filled_ += bytes_read;
    }

    if (read_filled_ == read_size_ && !dropped_)
    {
      ReadFinishedFunc callback;

      callback = read_callback_;

      callback(shared_from_this(), buffer, size, true);
    }

  }

    transport_->disableRead();
}

192: void Connection::writeTransport()
193: {
195: 
203: 
204:   while (has_write_callback_ && can_write_more && !dropped_)
205:   {
206:     uint32_t to_write = write_size_ - write_sent_;
208:     int32_t bytes_sent = transport_->write(write_buffer_.get() + write_sent_, to_write);
210: 
216: 
217:     write_sent_ += bytes_sent;
223: 
224:     if (write_sent_ == write_size_ && !dropped_)
225:     {
226:       WriteFinishedFunc callback;
227: 
228:       {
229:         boost::mutex::scoped_lock lock(write_callback_mutex_);
230:         ROS_ASSERT(has_write_callback_);
231:         // Store off a copy of the callback in case another write() call happens in it
232:         callback = write_callback_;
233:         write_callback_ = WriteFinishedFunc();
234:         write_buffer_ = boost::shared_array<uint8_t>();
235:         write_sent_ = 0;
236:         write_size_ = 0;
237:         has_write_callback_ = 0;
238:       }
239: 
240:       ROS_DEBUG_NAMED("superdebug", "Calling write callback");
241:       callback(shared_from_this());
242:     }
243:   }

}

void Connection::onWriteable(const TransportPtr& transport)
{
  (void)transport;
  ROS_ASSERT(transport == transport_);

  writeTransport();
}

void Connection::onDisconnect(const TransportPtr& transport)
{
  (void)transport;
  ROS_ASSERT(transport == transport_);

  drop(TransportDisconnect);
}

上述initialize中调用了,我们来分析其调用关系(–>表示调用),这里也回到了第一部分的问题。
1.transport_->setReadCallback(boost::bind(&Connection::onReadable, this, 1));
底层socket有数据读取时–>Connection::onReadable–>Connection::readTransport–>read_callback
(该callable在其他接口注册)。

2.transport_->setWriteCallback(boost::bind(&Connection::onWriteable, this, 1));
底层socket可以写时–>Connection::onWriteable–>writeTransport()–>write_callback
(该callable在其他接口注册)

3.transport_->setDisconnectCallback(boost::bind(&Connection::onDisconnect, this, _1));
断开连接时–>Connection::onDisconnect–>Connection::drop->Connection::addDropListener注册的监听器(后续代码分析)

4.if (header_func)
{
read(4, boost::bind(&Connection::onHeaderLengthRead, this, _1, _2, _3, _4));
}
header_func为true(这时为TCP通信,后续分析)–>Connection::onHeaderLengthRead->>调用后续函数,后续有分析

boost::signals2::connection Connection::addDropListener(const DropFunc& slot)
{
  boost::recursive_mutex::scoped_lock lock(drop_mutex_);
  return drop_signal_.connect(slot);
}

void Connection::removeDropListener(const boost::signals2::connection& c)
{
  boost::recursive_mutex::scoped_lock lock(drop_mutex_);
  c.disconnect();
}

void Connection::drop(DropReason reason)
{
    transport_->close();
    drop_signal_(shared_from_this(), reason);
}

addDropListener removeDropListener定义了connection drop时的监听器,Connection::drop–>drop_signal_(shared_from_this(), reason)–>Connection::addDropListener(const DropFunc& slot)中的slot监听器。

void Connection::writeHeader(const M_string& key_vals, const WriteFinishedFunc& finished_callback)
{
  ROS_ASSERT(!header_written_callback_);
  header_written_callback_ = finished_callback;

  if (!transport_->requiresHeader())
  {
    onHeaderWritten(shared_from_this());
    return;
  }

  write(full_msg, msg_len, boost::bind(&Connection::onHeaderWritten, this, _1), false);
}

void Connection::onHeaderWritten(const ConnectionPtr& conn)
{
  ROS_ASSERT(conn.get() == this);
  ROS_ASSERT(header_written_callback_);

  header_written_callback_(conn);
  header_written_callback_ = WriteFinishedFunc();
}

上述定义了writeHeader–>write(write完成时调用)–>onHeaderWritten–>header_written_callback_()也就是(finished_callback),这样Connection::writeHeader(const M_string& key_vals, const WriteFinishedFunc& finished_callback)中的finished_callback会得到调用

void Connection::setHeaderReceivedCallback(const HeaderReceivedFunc& func)
{
  header_func_ = func;

  if (transport_->requiresHeader())
    read(4, boost::bind(&Connection::onHeaderLengthRead, this, _1, _2, _3, _4));
}

void Connection::onHeaderLengthRead(const ConnectionPtr& conn, const boost::shared_array<uint8_t>& buffer, uint32_t size, bool success)
{

  read(len, boost::bind(&Connection::onHeaderRead, this, _1, _2, _3, _4));
}

void Connection::onHeaderRead(const ConnectionPtr& conn, const boost::shared_array<uint8_t>& buffer, uint32_t size, bool success)
{
      transport_->parseHeader(header_);

      header_func_(conn, header_);
}

上述定义了setHeaderReceivedCallback(保存header_func_ = func)–>read(完成时调用)–>onHeaderLengthRead–>onHeaderRead–>header_func_(func),这样Connection::setHeaderReceivedCallback(const HeaderReceivedFunc& func)完成时func得到了调用。

从上面我们看到了connection对transport的进一步封装,我们只看到了
1.transport_->setReadCallback(boost::bind(&Connection::onReadable, this, 1));
底层socket有数据读取时–>Connection::onReadable–>Connection::readTransport–>read_callback
(该callable在其他接口注册)。

2.transport_->setWriteCallback(boost::bind(&Connection::onWriteable, this, 1));
底层socket可以写时–>Connection::onWriteable–>writeTransport()–>write_callback
(该callable在其他接口注册)

3.addDropListener removeDropListener定义了connection drop时的监听器,Connection::drop–>drop_signal_(shared_from_this(), reason)–>Connection::addDropListener(const DropFunc& slot)中的slot监听器。

4.setHeaderReceivedCallback(保存header_func_ = func)–>read(完成时调用)–>onHeaderLengthRead–>onHeaderRead–>header_func_(func),这样Connection::setHeaderReceivedCallback(const HeaderReceivedFunc& func)完成时func得到了调用

那这些注册接口又是谁在调用?接下来我们从connect_manager中寻找答案。

三、ConnectionManager分析

先看ConnectionManager声明:

File: src/ros_comm/roscpp/include/ros/connection_manager.h

38: class PollManager;
39: typedef boost::shared_ptr<PollManager> PollManagerPtr;
40: 
41: class ConnectionManager;
42: typedef boost::shared_ptr<ConnectionManager> ConnectionManagerPtr;
43: 
44: class ROSCPP_DECL ConnectionManager
45: {
46: public:
        //单例模式
47:   static const ConnectionManagerPtr& instance();
48: 

60:   void addConnection(const ConnectionPtr& connection);
61: 
62:   void clear(Connection::DropReason reason);
63: 
64:   uint32_t getTCPPort();
65:   uint32_t getUDPPort();
66: 
67:   const TransportTCPPtr& getTCPServerTransport() { return tcpserver_transport_; }
68:   const TransportUDPPtr& getUDPServerTransport() { return udpserver_transport_; }
69: 
70:   void udprosIncomingConnection(const TransportUDPPtr& transport, Header& header);
71: 
        //重头戏在这里
72:   void start();

73:   void shutdown();
74: 
75: private:
76:   void onConnectionDropped(const ConnectionPtr& conn);
80:   void removeDroppedConnections();
81: 
82:   bool onConnectionHeaderReceived(const ConnectionPtr& conn, const Header& header);
83:   void tcprosAcceptConnection(const TransportTCPPtr& transport);
84: 
85:   PollManagerPtr poll_manager_;
86: 
87:   S_Connection connections_;
88:   V_Connection dropped_connections_;
89:   boost::mutex connections_mutex_;
90:   boost::mutex dropped_connections_mutex_;
91: 
92:   // The connection ID counter, used to assign unique ID to each inbound or
93:   // outbound connection.  Access via getNewConnectionID()
94:   uint32_t connection_id_counter_;
95:   boost::mutex connection_id_counter_mutex_;
96: 
97:   boost::signals2::connection poll_conn_;
98: 
99:   TransportTCPPtr tcpserver_transport_;
100:   TransportUDPPtr udpserver_transport_;
101: 
102:   const static int MAX_TCPROS_CONN_QUEUE = 100; // magic
103: };

实现文件分析如下:

File: src/ros_comm/roscpp/src/libros/connection_manager.cpp

43: const ConnectionManagerPtr& ConnectionManager::instance()
44: {
45:   static ConnectionManagerPtr connection_manager = boost::make_shared<ConnectionManager>();
46:   return connection_manager;
47: }

58: 
59: void ConnectionManager::start()
60: {
        //在poll_manager增加removeDroppedConnections,循环调用
61:   poll_manager_ = PollManager::instance();
62:   poll_conn_ = poll_manager_->addPollThreadListener(boost::bind(&ConnectionManager::removeDroppedConnections, 
63: 								this));
64: 
        //启动TCP传输
66:   tcpserver_transport_ = boost::make_shared<TransportTCP>(&poll_manager_->getPollSet());
67:   if (!tcpserver_transport_->listen(network::getTCPROSPort(), 
68: 				    MAX_TCPROS_CONN_QUEUE, 
69: 				    boost::bind(&ConnectionManager::tcprosAcceptConnection, this, _1)))
70:   {
71:     ROS_FATAL("Listen on port [%d] failed", network::getTCPROSPort());
72:     ROS_BREAK();
73:   }
74: 
75:   //启动UDP传输
76:   udpserver_transport_ = boost::make_shared<TransportUDP>(&poll_manager_->getPollSet());
77:   if (!udpserver_transport_->createIncoming(0, true))
78:   {
79:     ROS_FATAL("Listen failed");
80:     ROS_BREAK();
81:   }
82: }

156: void ConnectionManager::removeDroppedConnections()
157: {
158:   V_Connection local_dropped;
159:   {
160:     boost::mutex::scoped_lock dropped_lock(dropped_connections_mutex_);
161:     dropped_connections_.swap(local_dropped);
162:   }
163: 
164:   boost::mutex::scoped_lock conn_lock(connections_mutex_);
165: 
166:   V_Connection::iterator conn_it = local_dropped.begin();
167:   V_Connection::iterator conn_end = local_dropped.end();
168:   for (;conn_it != conn_end; ++conn_it)
169:   {
170:     const ConnectionPtr& conn = *conn_it;
171:     connections_.erase(conn);
172:   }
173: }

在ConnectionManager::start()中
1.将removeDroppedConnections增加到poll_manager监听器中,实现drop connection的监控。
2.启动tcp server,并绑定ConnectionManager::tcprosAcceptConnection(后续分析tcprosAcceptConnection)。
3.启动UDP server。

140: void ConnectionManager::addConnection(const ConnectionPtr& conn)
141: {
142:   boost::mutex::scoped_lock lock(connections_mutex_);
143: 
144:   //插入connections_
145:   connections_.insert(conn);
146:   //增加addDropListener
147:   conn->addDropListener(boost::bind(&ConnectionManager::onConnectionDropped, this, _1));
148: }
149: 
150: void ConnectionManager::onConnectionDropped(const ConnectionPtr& conn)
151: {
152:   boost::mutex::scoped_lock lock(dropped_connections_mutex_);
153:   dropped_connections_.push_back(conn);
154: }
155: 

ConnectionManager::addConnection(对外接口)–>注册ConnectionManager::onConnectionDropped(当drop时时)–>dropped_connections_.push_back(conn);–>然后被removeDroppedConnections循环调用清除掉。

174: 
175: void ConnectionManager::udprosIncomingConnection(const TransportUDPPtr& transport, Header& header)
176: {
177:   std::string client_uri = ""; // TODO: transport->getClientURI();
178:   ROSCPP_LOG_DEBUG("UDPROS received a connection from [%s]", client_uri.c_str());
179: 
180:   ConnectionPtr conn(boost::make_shared<Connection>());
181:   addConnection(conn);
182: 
183:   conn->initialize(transport, true, NULL);
184:   onConnectionHeaderReceived(conn, header);
185: }
186: 
187: void ConnectionManager::tcprosAcceptConnection(const TransportTCPPtr& transport)
188: {
189:   std::string client_uri = transport->getClientURI();
190:   ROSCPP_LOG_DEBUG("TCPROS received a connection from [%s]", client_uri.c_str());
191: 
192:   ConnectionPtr conn(boost::make_shared<Connection>());
193:   addConnection(conn);
194: 
195:   conn->initialize(transport, true, boost::bind(&ConnectionManager::onConnectionHeaderReceived, this, _1, _2));
196: }
197: 
198: bool ConnectionManager::onConnectionHeaderReceived(const ConnectionPtr& conn, const Header& header)
199: {
200:   bool ret = false;
201:   std::string val;
202:   if (header.getValue("topic", val))
203:   {
204:     ROSCPP_CONN_LOG_DEBUG("Connection: Creating TransportSubscriberLink for topic [%s] connected to [%s]", 
205: 		     val.c_str(), conn->getRemoteString().c_str());
206: 
207:     //增加sub_link
208:     TransportSubscriberLinkPtr sub_link(boost::make_shared<TransportSubscriberLink>());
209:     sub_link->initialize(conn);
210:     ret = sub_link->handleHeader(header);
211:   }
212:   else if (header.getValue("service", val))
213:   {
214:     ROSCPP_LOG_DEBUG("Connection: Creating ServiceClientLink for service [%s] connected to [%s]", 
215: 		     val.c_str(), conn->getRemoteString().c_str());
216: 
217:     ServiceClientLinkPtr link(boost::make_shared<ServiceClientLink>());
218:     link->initialize(conn);
219:     ret = link->handleHeader(header);
220:   }
221:   else
222:   {
223:   	ROSCPP_LOG_DEBUG("Got a connection for a type other than 'topic' or 'service' from [%s].  Fail.", 
224: 			 conn->getRemoteString().c_str());
225:     return false;
226:   }
227: 
228:   return ret;
229: }

udprosIncomingConnection–>addConnection 以及 onConnectionHeaderReceived
tcprosAcceptConnection–>addConnection 以及 onConnectionHeaderReceived
onConnectionHeaderReceived中区分topic还是service,然后进一步处理。

四、transport_subscriber_link transport_publisher_link分析

transport_subscriber_link 名字具有迷惑性,字面翻译是传输订阅者链接,那就是publication发布者的包装了。
transport_publisher_link 名字同样具有迷惑性,字面翻译时传输发布者链接,那就是subscription订阅者的包装了。

File: src/ros_comm/roscpp/include/ros/transport_subscriber_link.h
41: class ROSCPP_DECL TransportSubscriberLink : public SubscriberLink
42: {
43: public:
44:   TransportSubscriberLink();
45:   virtual ~TransportSubscriberLink();
46: 
47:   //
48:   bool initialize(const ConnectionPtr& connection);
        //处理header
49:   bool handleHeader(const Header& header);
50: 
51:   const ConnectionPtr& getConnection() { return connection_; }
52: 
        //序列化消息
53:   virtual void enqueueMessage(const SerializedMessage& m, bool ser, bool nocopy);
54:   virtual void drop();
55:   virtual std::string getTransportType();
56:   virtual std::string getTransportInfo();
57: 
58: private:
59:   void onConnectionDropped(const ConnectionPtr& conn);
60: 
61:   void onHeaderWritten(const ConnectionPtr& conn);
62:   void onMessageWritten(const ConnectionPtr& conn);
63:   void startMessageWrite(bool immediate_write);
64: 
65:   bool writing_message_;
66:   bool header_written_;
67: 
68:   ConnectionPtr connection_;
69:   boost::signals2::connection dropped_conn_;
70: 
71:   std::queue<SerializedMessage> outbox_;
72:   boost::mutex outbox_mutex_;
73:   bool queue_full_;
74: };
75: typedef boost::shared_ptr<TransportSubscriberLink> TransportSubscriberLinkPtr;

以上是TransportSubscriberLink,由于是发布者包装,所以有handleHeader处理来自订阅者的header,enqueueMessage序列化发布的消息。

File: src/ros_comm/roscpp/src/libros/transport_subscriber_link.cpp

57: bool TransportSubscriberLink::initialize(const ConnectionPtr& connection)
58: {
59:   connection_ = connection;
60:   dropped_conn_ = connection_->addDropListener(boost::bind(&TransportSubscriberLink::onConnectionDropped, this, _1));
61: 
62:   return true;
63: }

122: void TransportSubscriberLink::onConnectionDropped(const ConnectionPtr& conn)
123: {
124:   (void)conn;
125:   ROS_ASSERT(conn == connection_);
126: 
127:   PublicationPtr parent = parent_.lock();
128: 
129:   if (parent)
130:   {
131:     ROSCPP_CONN_LOG_DEBUG("Connection to subscriber [%s] to topic [%s] dropped", connection_->getRemoteString().c_str(), topic_.c_str());
132: 
133:     parent->removeSubscriberLink(shared_from_this());
134:   }
135: }
64: 

调用关系如下:
initialize–>connection_->addDropListener注册onConnectionDropped(当drop事件发生时)–>TransportSubscriberLink::onConnectionDropped

65: bool TransportSubscriberLink::handleHeader(const Header& header)
66: {

81:   PublicationPtr pt = TopicManager::instance()->lookupPublication(topic);
101: 
102:   destination_caller_id_ = client_callerid;
103:   connection_id_ = ConnectionManager::instance()->getNewConnectionID();
104:   topic_ = pt->getName();
105:   parent_ = PublicationWPtr(pt);
106: 
107:   // Send back a success, with info
108:   M_string m;
109:   m["type"] = pt->getDataType();
110:   m["md5sum"] = pt->getMD5Sum();
111:   m["message_definition"] = pt->getMessageDefinition();
112:   m["callerid"] = this_node::getName();
113:   m["latching"] = pt->isLatching() ? "1" : "0";
114:   m["topic"] = topic_;
115:   connection_->writeHeader(m, boost::bind(&TransportSubscriberLink::onHeaderWritten, this, _1));
116: 
117:   pt->addSubscriberLink(shared_from_this());
118: 
119:   return true;
120: }
136: 
137: void TransportSubscriberLink::onHeaderWritten(const ConnectionPtr& conn)
138: {
139:   (void)conn;
140:   header_written_ = true;
141:   startMessageWrite(true);
142: }
143: 
144: void TransportSubscriberLink::onMessageWritten(const ConnectionPtr& conn)
145: {
146:   (void)conn;
147:   writing_message_ = false;
148:   startMessageWrite(true);
149: }
150: 
151: void TransportSubscriberLink::startMessageWrite(bool immediate_write)
152: {
153:   boost::shared_array<uint8_t> dummy;
154:   SerializedMessage m(dummy, (uint32_t)0);
155: 
156:   {
157:     boost::mutex::scoped_lock lock(outbox_mutex_);
158:     if (writing_message_ || !header_written_)
159:     {
160:       return;
161:     }
162: 
163:     if (!outbox_.empty())
164:     {
165:       writing_message_ = true;
166:       m = outbox_.front();
167:       outbox_.pop();
168:     }
169:   }
170: 
171:   if (m.num_bytes > 0)
172:   {
173:     connection_->write(m.buf, m.num_bytes, boost::bind(&TransportSubscriberLink::onMessageWritten, this, _1), immediate_write);
174:   }
175: }

1.结合connectionmanager进行分析,ConnectionManager::onConnectionHeaderReceived–>handleHeader–>onHeaderWritten–>startMessageWrite,这个调用链条导致开始写消息。

2.startMessageWrite–>onMessageWritten–>outbox_.pop()->write->startMessageWrite,这个链条会导致startMessageWrite循环调用,这样outbox_有数据时会循环写到socket

177: void TransportSubscriberLink::enqueueMessage(const SerializedMessage& m, bool ser, bool nocopy)
178: {
184: 
185:   {
186:     boost::mutex::scoped_lock lock(outbox_mutex_);
187: 
188:     int max_queue = 0;
189:     if (PublicationPtr parent = parent_.lock())

196:     if (max_queue > 0 && (int)outbox_.size() >= max_queue)
197:     {
205:       outbox_.pop(); // toss out the oldest thing in the queue to make room for us
206:       queue_full_ = true;
207:     }
208:     else
209:     {
210:       queue_full_ = false;
211:     }
212: 
213:     outbox_.push(m);
214:   }
215: 
216:   startMessageWrite(false);
217: 
218:   stats_.messages_sent_++;
219:   stats_.bytes_sent_ += m.num_bytes;
220:   stats_.message_data_sent_ += m.num_bytes;
221: }
222: 

TransportSubscriberLink::enqueueMessage–>startMessageWrite–>…
TransportSubscriberLink::enqueueMessage是对外接口,当node调用publish最后回调用到这个接口,后续文章分析。

File: src/ros_comm/roscpp/include/ros/transport_publisher_link.h
51: class ROSCPP_DECL TransportPublisherLink : public PublisherLink
52: {
53: public:
 
65: 
66: private:
67:   void onConnectionDropped(const ConnectionPtr& conn, Connection::DropReason reason);
68:   bool onHeaderReceived(const ConnectionPtr& conn, const Header& header);
69: 
70:   /**
71:    * \brief Handles handing off a received message to the subscription, where it will be deserialized and called back
72:    */
73:   virtual void handleMessage(const SerializedMessage& m, bool ser, bool nocopy);
74: 
75:   void onHeaderWritten(const ConnectionPtr& conn);
76:   void onMessageLength(const ConnectionPtr& conn, const boost::shared_array<uint8_t>& buffer, uint32_t size, bool success);
77:   void onMessage(const ConnectionPtr& conn, const boost::shared_array<uint8_t>& buffer, uint32_t size, bool success);
78: 
79:   void onRetryTimer(const ros::SteadyTimerEvent&);
80: 
81:   ConnectionPtr connection_;
82: 
83:   int32_t retry_timer_handle_;
84:   bool needs_retry_;
85:   WallDuration retry_period_;
86:   SteadyTime next_retry_;
87:   bool dropping_;
88: };
89: typedef boost::shared_ptr<TransportPublisherLink> TransportPublisherLinkPtr;
90: 

以上是TransportPublisherLink,由于是订阅者包装,所以有onHeaderReceived处理header接收事件,handleMessage处理接收到的消息。

File: src/ros_comm/roscpp/src/libros/transport_publisher_link.cpp

51: #include <boost/bind.hpp>
52: 
53: #include <sstream>
54: 
55: namespace ros
56: {
57: 
58: TransportPublisherLink::TransportPublisherLink(const SubscriptionPtr& parent, const std::string& xmlrpc_uri, const TransportHints& transport_hints)
59: : PublisherLink(parent, xmlrpc_uri, transport_hints)
60: , retry_timer_handle_(-1)
61: , needs_retry_(false)
62: , dropping_(false)
63: {
64: }

77: 
78: bool TransportPublisherLink::initialize(const ConnectionPtr& connection)
79: {
80:   connection_ = connection;

85:   connection_->addDropListener(Connection::DropSignal::slot_type(&TransportPublisherLink::onConnectionDropped, this, _1, _2).track(shared_from_this()));
86: 
87:   if (connection_->getTransport()->requiresHeader())
88:   {
89:     connection_->setHeaderReceivedCallback(boost::bind(&TransportPublisherLink::onHeaderReceived, this, _1, _2));
90: 
91:     SubscriptionPtr parent = parent_.lock();
92:     if (!parent)
93:     {
94:       return false;
95:     }
96: 
97:     M_string header;
98:     header["topic"] = parent->getName();
99:     header["md5sum"] = parent->md5sum();
100:     header["callerid"] = this_node::getName();
101:     header["type"] = parent->datatype();
102:     header["tcp_nodelay"] = transport_hints_.getTCPNoDelay() ? "1" : "0";
103:     connection_->writeHeader(header, boost::bind(&TransportPublisherLink::onHeaderWritten, this, _1));
104:   }
105:   else
106:   {
107:     connection_->read(4, boost::bind(&TransportPublisherLink::onMessageLength, this, _1, _2, _3, _4));
108:   }
109: 
110:   return true;
111: }
112: 
123: 
124: void TransportPublisherLink::onHeaderWritten(const ConnectionPtr& conn)
125: {
126:   (void)conn;
127:   // Do nothing
128: }
129: 

130: bool TransportPublisherLink::onHeaderReceived(const ConnectionPtr& conn, const Header& header)
131: {
146: 
147:   connection_->read(4, boost::bind(&TransportPublisherLink::onMessageLength, this, _1, _2, _3, _4));
148: 
150: }
151: 
152: void TransportPublisherLink::onMessageLength(const ConnectionPtr& conn, const boost::shared_array<uint8_t>& buffer, uint32_t size, bool success)
153: {
154:   (void)conn;
155:   (void)size;
156:   if (retry_timer_handle_ != -1)
157:   {
158:     getInternalTimerManager()->remove(retry_timer_handle_);
159:     retry_timer_handle_ = -1;
160:   }
161: 
162:   if (!success)
163:   {
164:     if (connection_)
165:       connection_->read(4, boost::bind(&TransportPublisherLink::onMessageLength, this, _1, _2, _3, _4));
166:     return;
167:   }
168: 
169:   ROS_ASSERT(conn == connection_);
170:   ROS_ASSERT(size == 4);
171: 
172:   uint32_t len = *((uint32_t*)buffer.get());
173: 
174:   if (len > 1000000000)
175:   {
176:     ROS_ERROR("a message of over a gigabyte was " \
177:                 "predicted in tcpros. that seems highly " \
178:                 "unlikely, so I'll assume protocol " \
179:                 "synchronization is lost.");
180:     drop();
181:     return;
182:   }
183: 
184:   connection_->read(len, boost::bind(&TransportPublisherLink::onMessage, this, _1, _2, _3, _4));
185: }
186: 
187: void TransportPublisherLink::onMessage(const ConnectionPtr& conn, const boost::shared_array<uint8_t>& buffer, uint32_t size, bool success)
188: {
189:   if (!success && !conn)
190:     return;
191: 
192:   ROS_ASSERT(conn == connection_);
193: 
194:   if (success)
195:   {
196:     handleMessage(SerializedMessage(buffer, size), true, false);
197:   }
198: 
199:   if (success || !connection_->getTransport()->requiresHeader())
200:   {
201:     connection_->read(4, boost::bind(&TransportPublisherLink::onMessageLength, this, _1, _2, _3, _4));
202:   }
203: }

299: void TransportPublisherLink::handleMessage(const SerializedMessage& m, bool ser, bool nocopy)
300: {
301:   stats_.bytes_received_ += m.num_bytes;
302:   stats_.messages_received_++;
303: 
304:   SubscriptionPtr parent = parent_.lock();
305: 
306:   if (parent)
307:   {
308:     stats_.drops_ += parent->handleMessage(m, ser, nocopy, getConnection()->getHeader().getValues(), shared_from_this());
309:   }
310: }

TransportPublisherLink::onHeaderReceived–>TransportPublisherLink::onMessageLength–>TransportPublisherLink::onMessage–>TransportPublisherLink::handleMessage–>Subscription::handleMessage
这样就完成了数据subscription节点的数据接收并调用Subscription::handleMessage,后续文章分析。

204: 
205: void TransportPublisherLink::onRetryTimer(const ros::SteadyTimerEvent&)
206: {
207:   if (dropping_)
208:   {
209:     return;
210:   }
211: 
212:   if (needs_retry_ && SteadyTime::now() > next_retry_)
213:   {
214:     retry_period_ = std::min(retry_period_ * 2, WallDuration(20));
215:     needs_retry_ = false;
216:     SubscriptionPtr parent = parent_.lock();
217:     // TODO: support retry on more than just TCP
218:     // For now, since UDP does not have a heartbeat, we do not attempt to retry
219:     // UDP connections since an error there likely means some invalid operation has
220:     // happened.
221:     if (connection_->getTransport()->getType() == std::string("TCPROS"))
222:     {
223:       std::string topic = parent ? parent->getName() : "unknown";
224: 
225:       TransportTCPPtr old_transport = boost::dynamic_pointer_cast<TransportTCP>(connection_->getTransport());
226:       ROS_ASSERT(old_transport);
227:       const std::string& host = old_transport->getConnectedHost();
228:       int port = old_transport->getConnectedPort();
229: 
230:       ROSCPP_CONN_LOG_DEBUG("Retrying connection to [%s:%d] for topic [%s]", host.c_str(), port, topic.c_str());
231: 
232:       TransportTCPPtr transport(boost::make_shared<TransportTCP>(&PollManager::instance()->getPollSet()));
233:       if (transport->connect(host, port))
234:       {
235:         ConnectionPtr connection(boost::make_shared<Connection>());
236:         connection->initialize(transport, false, HeaderReceivedFunc());
237:         if (initialize(connection))
238:         {
239:           ConnectionManager::instance()->addConnection(connection);
240:         }
241:       }
242:       else
243:       {
244:         ROSCPP_CONN_LOG_DEBUG("connect() failed when retrying connection to [%s:%d] for topic [%s]", host.c_str(), port, topic.c_str());
245:       }
246:     }
247:     else if (parent)
248:     {
249:       parent->removePublisherLink(shared_from_this());
250:     }
251:   }
252: }
253: 
254: CallbackQueuePtr getInternalCallbackQueue();
255: 
256: void TransportPublisherLink::onConnectionDropped(const ConnectionPtr& conn, Connection::DropReason reason)
257: {
258:   (void)conn;
259:   if (dropping_)
260:   {
261:     return;
262:   }
263: 
264:   ROS_ASSERT(conn == connection_);
265: 
266:   SubscriptionPtr parent = parent_.lock();
267: 
268:   if (reason == Connection::TransportDisconnect)
269:   {
270:     std::string topic = parent ? parent->getName() : "unknown";
271: 
272:     ROSCPP_CONN_LOG_DEBUG("Connection to publisher [%s] to topic [%s] dropped", connection_->getTransport()->getTransportInfo().c_str(), topic.c_str());
273: 
274:     ROS_ASSERT(!needs_retry_);
275:     needs_retry_ = true;
276:     next_retry_ = SteadyTime::now() + retry_period_;
277: 
278:     if (retry_timer_handle_ == -1)
279:     {
280:       retry_period_ = WallDuration(0.1);
281:       next_retry_ = SteadyTime::now() + retry_period_;
282:       // shared_from_this() shared_ptr is used to ensure TransportPublisherLink is not
283:       // destroyed in the middle of onRetryTimer execution
284:       retry_timer_handle_ = getInternalTimerManager()->add(WallDuration(retry_period_),
285:           boost::bind(&TransportPublisherLink::onRetryTimer, this, _1), getInternalCallbackQueue().get(),
286:           shared_from_this(), false);
287:     }
288:     else
289:     {
290:       getInternalTimerManager()->setPeriod(retry_timer_handle_, retry_period_);
291:     }
292:   }
293:   else
294:   {
295:     drop();
296:   }
297: }
298: 

重试以及drop处理逻辑,源码列在这里不做分析。

五、和其他模块的关系
1.ConnectionManager如何启动的?

File: src/ros_comm/roscpp/src/libros/init.cpp
293: void start()
294: {
328:   ConnectionManager::instance()->start();
}

在init.cpp中start()实例化并调用start启动ConnectionManager。

2.TransportPublisherLink::handleMessage以及TransportSubscriberLink::enqueueMessage是对外接口,我们暂且认为算是connect_manager的总对外接口吧,后续文章分析。

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值