(三)基于BOOST ASIO实现的UDP服务端
代码
//asio_socket.h
#pragma once
#include <queue>
#include <mutex>
#include <string>
#include <vector>
#include <atomic>
#include <map>
#include <unordered_map>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <assert.h>
#include <condition_variable>
#include <iostream>
#include "glog/init_glog.h"
typedef boost::asio::io_service io_service_type;
typedef boost::shared_ptr<io_service_type> io_service_ptr_type;
typedef boost::asio::ip::tcp::resolver tcp_resolver_type;
typedef boost::asio::ip::udp::resolver udp_resolver_type;
typedef boost::asio::ip::tcp::socket tcp_socket_type;
typedef boost::asio::ip::udp::socket udp_socket_type;
typedef boost::shared_ptr<tcp_socket_type> tcp_socket_ptr_type;
typedef boost::shared_ptr<udp_socket_type> udp_socket_ptr_type;
typedef boost::asio::ip::tcp::endpoint tcp_endpoint_type;
typedef boost::asio::ip::udp::endpoint udp_endpoint_type;
typedef boost::asio::ip::address address_type;
typedef boost::asio::ip::address_v4 address_v4_type;
typedef boost::asio::ip::address_v6 address_v6_type;
typedef boost::asio::io_service::work work_type;
typedef boost::shared_ptr<work_type> work_ptr_type;
typedef boost::asio::ip::tcp::acceptor tcp_acceptor_type;
typedef boost::shared_ptr<tcp_acceptor_type> tcp_acceptor_ptr_type;
typedef boost::asio::deadline_timer dtimer_type;
typedef boost::system::error_code error_code_type;
typedef std::vector<char> buffer_type;
#define TCP_V4 boost::asio::ip::tcp::v4()
#define TCP_V6 boost::asio::ip::tcp::v6()
#define UDP_V4 boost::asio::ip::udp::v4()
#define UDP_V6 boost::asio::ip::udp::v6()
#define TCP_V4_PROTOCOL boost::asio::ip::tcp::v4().protocol()
#define TCP_V6_PROTOCOL boost::asio::ip::tcp::v6().protocol()
#define UDP_V4_PROTOCOL boost::asio::ip::udp::v4().protocol()
#define UDP_V6_PROTOCOL boost::asio::ip::udp::v6().protocol()
#define BOOST_SLEEP_S(x) (boost::this_thread::sleep(boost::posix_time::seconds(x)))
#define BOOST_SLEEP_MS(x) (boost::this_thread::sleep(boost::posix_time::milliseconds(x)))
const size_t tcp_max_readbuf_size = 2000;
const size_t tcp_max_writebuf_size = 2000;
const size_t udp_max_sendbuf_size = 2000;
const size_t udp_max_recvbuf_size = 2000;
const int tcp_max_readbuf_raw_size = 65536;
const int tcp_max_msg_length = 65536;
const int udp_max_msg_length = 65536;
const int tcp_max_connected_server_num = 2;// 64;//服务端最大连接数
const int tcp_server_read_timeout = 15;//服务端连接读取超时时间,若长时间没有心跳将断开连接
const int INFIT_TIME = 0x7FFFFFFF;
//asio_udp_partner.hpp
#include "asio_socket.h"
using std::string;
using std::vector;
class UdpPartner
{
enum WORK_MODE
{
WORK_MODE_SINGLECAST_CLIENT = 0,
WORK_MODE_SINGLECAST_SERVER = 1,
WORK_MODE_MULTICAST_RECEIVER = 2,
WORK_MODE_MULTICAST_SENDER = 3,
};
typedef std::pair<string, udp_endpoint_type> UMSG;
private:
udp_endpoint_type m_remote_ep;//构造时初始化
udp_endpoint_type m_local_ep;//构造时初始化
io_service_type m_ios;
udp_socket_type m_sock;
std::mutex mut_send_buf;
std::condition_variable cv_send_buf;
std::queue<UMSG> m_send_to_buf;
std::mutex mut_recv_buf;
std::condition_variable cv_recv_buf;
std::queue<UMSG> m_recv_from_buf;
std::atomic<bool> m_stop;
boost::thread th_send;
boost::thread th_recv;
WORK_MODE m_work_mode;
public:
//多播 udp
UdpPartner(const string & multicast_ip, int multicast_port,bool multicast_sender=true)
: m_remote_ep(address_v4_type::from_string(multicast_ip), multicast_port)
, m_local_ep(address_v4_type::from_string("0.0.0.0"), multicast_port)
, m_sock(m_ios, boost::asio::ip::udp::v4())
, m_stop(false)
{
if (multicast_sender)
{
//多播 udp sender(不绑定本地端口,由系统自动分配)
m_work_mode = WORK_MODE_MULTICAST_SENDER;
th_send = boost::thread(boost::bind(&UdpPartner::thread_send_to_loop, this));
}
else
{
//多播 udp receiver(需要绑定多播端口,用于接收数据)
m_work_mode = WORK_MODE_MULTICAST_RECEIVER;
//多播选项设置-BEGIN//
m_sock.set_option(boost::asio::ip::udp::socket::reuse_address(true));
m_sock.bind(m_local_ep);
m_sock.set_option(boost::asio::ip::multicast::join_group(m_remote_ep.address()));
m_sock.set_option(boost::asio::ip::multicast::enable_loopback(true));
//多播选项设置-END//
th_recv = boost::thread(boost::bind(&UdpPartner::thread_recv_from_loop, this));
}
}
//单播client: udp sender
//@param-remote_ip:对端UDP的IP
//@param-remote_port:对端UDP绑定的PORT
//@param-local_port:本端UDP绑定的PORT,若为0,则由系统随机分配。
//@param-connect_udp:是否保持连接对端UDP
UdpPartner(const string & remote_ip, int remote_port, int local_port=0, bool connect_udp=true)
: m_remote_ep(address_v4_type::from_string(remote_ip), remote_port)
, m_local_ep(boost::asio::ip::udp::v4(), local_port)
, m_sock(m_ios, boost::asio::ip::udp::v4())
, m_stop(false)
, m_work_mode(WORK_MODE_SINGLECAST_CLIENT)
{
if (local_port>0)
{
m_sock.bind(m_local_ep);
}
if (connect_udp)
{
error_code_type err;
m_sock.connect(m_remote_ep, err);
if (err)
{
LOG_INFO_GLOG << "udp connect failed:" << err.message() ;
}
else
{
LOG_INFO_GLOG << "udp connect success" ;
}
th_send = boost::thread(boost::bind(&UdpPartner::thread_send_loop, this));
th_recv = boost::thread(boost::bind(&UdpPartner::thread_recv_loop, this));
}
else
{
th_recv = boost::thread(boost::bind(&UdpPartner::thread_recv_from_loop, this));
th_send = boost::thread(boost::bind(&UdpPartner::thread_send_to_loop, this));
}
}
//单播server: udp receiverd
//@param-local_port:本端UDP绑定的PORT
UdpPartner(int local_port)
: m_remote_ep(udp_endpoint_type())
, m_local_ep(boost::asio::ip::udp::v4(), local_port)
, m_sock(m_ios, m_local_ep)
, m_stop(false)
, m_work_mode(WORK_MODE_SINGLECAST_SERVER)
{
th_recv = boost::thread(boost::bind(&UdpPartner::thread_recv_from_loop, this));
th_send = boost::thread(boost::bind(&UdpPartner::thread_send_to_loop, this));
}
~UdpPartner()
{
stop();
}
void stop()
{
m_stop = true;
std::queue<UMSG> empty_queue;
{
std::unique_lock<std::mutex> lk(mut_recv_buf);
std::swap(m_recv_from_buf, empty_queue);
cv_recv_buf.notify_all();
}
{
std::unique_lock<std::mutex> lk(mut_send_buf);
std::swap(m_send_to_buf, empty_queue);
cv_send_buf.notify_all();
}
if (m_work_mode == WORK_MODE_MULTICAST_RECEIVER)
{
m_sock.set_option(boost::asio::ip::multicast::leave_group(m_remote_ep.address()));
}
if (th_send.joinable())
{
th_send.interrupt();
th_send.join();
}
if (th_recv.joinable())
{
th_recv.interrupt();
th_recv.join();
}
}
void push_send_buf(const string & msg, const udp_endpoint_type & ep = udp_endpoint_type())
{
if (m_stop || m_work_mode == WORK_MODE_MULTICAST_RECEIVER)
{
return;
}
std::unique_lock<std::mutex> lk(mut_send_buf);
if (m_send_to_buf.size() >= udp_max_sendbuf_size)
{
LOG_INFO_GLOG << "send buf overflow";
m_send_to_buf.pop();
}
m_send_to_buf.emplace(msg, ep);
cv_send_buf.notify_one();
}
bool pop_recv_buf(string & msg,udp_endpoint_type & ep)
{
if (m_stop || m_work_mode == WORK_MODE_MULTICAST_SENDER)
{
return false;
}
std::unique_lock<std::mutex> lk(mut_recv_buf);
cv_recv_buf.wait(lk, [this](){return (!m_recv_from_buf.empty() || m_stop); });
if (m_stop)
{
return false;
}
msg = m_recv_from_buf.front().first;
ep = m_recv_from_buf.front().second;
m_recv_from_buf.pop();
return true;
}
private:
//连接-单播UDP
void thread_send_loop()
{
LOG_INFO_GLOG << "start thread_send_loop of [connected] udp";
string msg;
error_code_type err;
while (!m_stop)
{
boost::this_thread::interruption_point();
{
std::unique_lock<std::mutex> lk(mut_send_buf);
cv_send_buf.wait(lk, [this](){return (!m_send_to_buf.empty() || m_stop); });
if (m_stop)
{
return;
}
msg = m_send_to_buf.front().first;
m_send_to_buf.pop();
}
size_t size = m_sock.send(boost::asio::buffer(msg), 0, err);
if (err)
{
LOG_WARN_GLOG << err.message();
return;
}
LOG_INFO_GLOG << "[send to] " << m_remote_ep.address().to_string()
<< ":" << m_remote_ep.port() << " >>" << msg;
}
}
void thread_recv_loop()
{
LOG_INFO_GLOG << "start thread_recv_loop of [connected] udp";
error_code_type err;
char buf[udp_max_msg_length];
while (!m_stop)
{
boost::this_thread::interruption_point();
memset(buf, 0, udp_max_msg_length);
size_t size = m_sock.receive(boost::asio::buffer(buf, udp_max_msg_length), 0, err);
if (err)
{
LOG_WARN_GLOG << err.message();
continue;
}
{
std::unique_lock<std::mutex> lk(mut_recv_buf);
if (m_recv_from_buf.size() >= udp_max_recvbuf_size)
{
LOG_WARN_GLOG << "recv buf overflow";
m_recv_from_buf.pop();
}
string msg(buf);
m_recv_from_buf.emplace(msg, m_remote_ep);
LOG_INFO_GLOG << "[recv from] " << m_remote_ep.address().to_string() << ":" << m_remote_ep.port() << " >>" << msg;
}
cv_recv_buf.notify_one();
}
}
//未连接-单播UDP//多播UDP
void thread_send_to_loop()
{
LOG_INFO_GLOG << "start thread_send_to_loop of udp";
string msg;
error_code_type err;
udp_endpoint_type the_remote_ep;
while (!m_stop)
{
boost::this_thread::interruption_point();
{
std::unique_lock<std::mutex> lk(mut_send_buf);
cv_send_buf.wait(lk, [this](){return (!m_send_to_buf.empty() || m_stop); });
if (m_stop)
{
return;
}
msg = m_send_to_buf.front().first;
if (m_work_mode == WORK_MODE_SINGLECAST_CLIENT
|| m_work_mode == WORK_MODE_SINGLECAST_SERVER)
{
//单播
the_remote_ep = m_send_to_buf.front().second;
}
else
{
//多播
the_remote_ep = m_remote_ep;
}
m_send_to_buf.pop();
}
size_t size = m_sock.send_to(boost::asio::buffer(msg), the_remote_ep, 0, err);
if (err)
{
LOG_WARN_GLOG << err.message();
return;
}
LOG_INFO_GLOG << "[send to] " << the_remote_ep.address().to_string()
<< ":" << the_remote_ep.port() << " >>" << msg;
}
}
void thread_recv_from_loop()
{
LOG_INFO_GLOG << "start thread_recv_from_loop of udp";
error_code_type err;
udp_endpoint_type the_remote_ep;
char buf[udp_max_msg_length];
while (!m_stop)
{
boost::this_thread::interruption_point();
memset(buf, 0, udp_max_msg_length);
size_t size = m_sock.receive_from(boost::asio::buffer(buf, udp_max_msg_length), the_remote_ep, 0, err);
if (err)
{
LOG_WARN_GLOG << err.message();
continue;
}
{
std::unique_lock<std::mutex> lk(mut_recv_buf);
if (m_recv_from_buf.size() >= udp_max_recvbuf_size)
{
LOG_WARN_GLOG << "recv buf overflow";
m_recv_from_buf.pop();
}
string msg(buf);
m_recv_from_buf.emplace(msg,the_remote_ep);
LOG_INFO_GLOG << "[recv from] " << the_remote_ep.address().to_string()
<< ":" << the_remote_ep.port() << " >>" << msg;
}
cv_recv_buf.notify_one();
}
}
};
typedef boost::shared_ptr<UdpPartner> pUdpPartnerType;
//demo.cpp
#include "asio_udp_partner.hpp"
using namespace std;
int main()
{
char c;
while (c = getchar())
{
if (c == '1')
{
cout << "start UdpPartner of client" << endl;
UdpPartner c("127.0.0.1", 10087, 0, true);
while (1)
{
string tmp = "client msg:" + to_string(time(nullptr));
udp_endpoint_type ep(address_v4_type::from_string("10.33.65.23"), 10087);
BOOST_SLEEP_S(6);
c.push_send_buf(tmp, ep);
}
}
else if (c == '2')
{
cout << "start UdpPartner of server" << endl;
UdpPartner s(10087);
while (1)
{
string tmp;
udp_endpoint_type ep;
if (s.pop_recv_buf(tmp, ep))
{
string tmp1 = "server msg----------:" + to_string(time(nullptr));
s.push_send_buf(tmp1, ep);
}
}
}
else if (c == '3')
{
cout << "start UdpPartner of multicast sender" << endl;
UdpPartner pub("239.0.0.1", 10086, true);
while (1)
{
string tmp = "multicast send msg:" + to_string(time(nullptr));
udp_endpoint_type ep;
BOOST_SLEEP_S(3);
pub.push_send_buf(tmp, ep);
}
}
else if (c == '4')
{
cout << "start UdpPartner of multicast receiver" << endl;
UdpPartner sub("239.0.0.1", 10086, false);
while (1)
{
string tmp;
udp_endpoint_type ep;
sub.pop_recv_buf(tmp, ep);
BOOST_SLEEP_S(1);
}
}
else
{
continue;
}
}
return 0;
}
使用说明
-
1.本文基于BOOST ASIO实现了UDP的单播,组播(广播与组播仅对端地址不同);
-
2.UDP单播实现了客户端和服务端,可通过不同的构造函数构造不同的UDP参与者。
UdpPartner(const string & multicast_ip, int multicast_port,bool multicast_sender=true);
构造UDP组播对象,入参依次为:组播IP、组播port、本次构造的是发送端还是接收端。
并启动发送或者接收线程。
UdpPartner(const string & remote_ip, int remote_port, int local_port=0, bool connect_udp=true);
构造UDP单播客户端,入参依次为:远程服务端IP、远程服务端port、客户端绑定的本地端口、是否连接。
并启动发送和接收线程。
注意:UDP是可以调用
connect
函数的,但这里的UDP连接与TCP三次握手的连接截然不同。UDP连接操作会使内核:1.检查是否存在马上可知的错误。2.记录对端的IP地址和port号,然后马上返回调用进程。一个UDP连接可以多次调用
connect
函数,以最后一次调用传入的信息为准。UdpPartner(int local_port);
构造UDP单播服务端,入参为:服务端绑定的本地端口。
并启动发送和接收线程。
-
- 本文实现中使用生产者-消费者模型。
对发送端而言,向m_send_to_buf
中加入待发送的数据,视为生产。发送线程将从中取出数据进行发送,视为消费。
对接收线程而言,向m_recv_from_buf
中加入已接收的数据,视为生产。接收端从中取出数据进行handle处理,视为消费。
- 本文实现中使用生产者-消费者模型。