boost库的IO服务线程池封装笔记

BoostAsioSocket.h

#pragma once
#include <string>
#include <boost/asio.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/bind.hpp>
#include <boost/smart_ptr.hpp>
#include <boost/make_shared.hpp>
#include <boost/ref.hpp>
#include <boost/thread.hpp>
#include "WorkEvent.h"

#if (defined _WIN32 || defined _WIN64)
#define WINAPI __stdcall
#define CALLBACK __stdcall
#else
#define WINAPI //__attribute__((__stdcall))
#define CALLBACK //__attribute__((__stdcall))
#endif


#ifdef USE_LOWER_GCC
#include "boost/atomic.hpp"
#else
#include <atomic>
#endif

using namespace std;
using namespace ASYN_WORK;

const string boost_str_e_ok = "ok";
const string boost_str_e_error = "error";
const string boost_str_e_refused = "refused";
const string boost_str_e_timeout = "timeout";

const string boost_str_accept_new_socket = "AcceptNewSocket";
const string boost_str_receive_complete = "RecvComplete";
const string boost_str_send_complete = "SendComplete";
const string boost_str_connect_complete = "ConnectComplete";
const string boost_str_pull_send = "PullSend";

typedef struct tagACCEPT_SOCKET
{
	void* pSocket;
	string IP;
	int nPort;
	tagACCEPT_SOCKET()
	{
        pSocket = nullptr;
		nPort = 0;
	}
}ACCEPT_SOCKET, *LPACCEPT_SOCKET;


typedef struct tagIO_BUF
{
	unsigned char* pBuf;
	int nToTransfer;
	int nTransferred;
	tagIO_BUF()
	{
        pBuf = nullptr;
		nToTransfer = 0;
		nTransferred = 0;
	}
}IO_BUF, *LPIO_BUF;

enum enSocketStatus
{
	INF_SOCKET_CONNECTING,
	INF_SOCKET_CONNECTED,
	INF_SOCKET_CLOSING,
	INF_SOCKET_LISTENING,
	INF_SOCKET_CLOSED
};

typedef int(CALLBACK* lpSocketCallBack)(void* pUser, void* pSocket, const char* CmdType, const char* Error, unsigned char* pBuf, int nSize);


class BoostAsioSocket
	: public boost::enable_shared_from_this<BoostAsioSocket>
{
public:
	BoostAsioSocket();
	BoostAsioSocket(boost::shared_ptr<boost::asio::ip::tcp::socket>& pSocket);
	~BoostAsioSocket();
	int StartListen(string Server, int nPort, lpSocketCallBack pCallback, void* pUser);
	int StopListen();
private:
	boost::asio::io_service& m_ios;
	boost::asio::ip::tcp::acceptor m_acceptor;
	boost::shared_ptr<boost::asio::ip::tcp::socket> m_spSocket;
	boost::asio::strand m_strand;
	boost::recursive_mutex m_mutex;
	lpSocketCallBack m_pListenCallback;
	lpSocketCallBack m_pRecvCallback;
	lpSocketCallBack m_pSendCallback;
	lpSocketCallBack m_pConnectCallback;
	lpSocketCallBack m_pNotifySendCallback;
	void* m_pListenUser;
	void* m_pRecvUser;
	void* m_pSendUser;
	void* m_pConnectUser;
	void* m_pNotifySendUser;
	bool m_IsReceiving;
	bool m_IsSending;
	IO_BUF m_BufReceving;
	IO_BUF m_BufSending;
	enSocketStatus m_enStatus;
	string m_RemoteIP;
	int m_nRemotePort;
	CWorkEvent m_evtClose;
	bool m_bClose;
	
private:
	int AsynAccept();
	void HandleAsynAccept(const boost::system::error_code& error);
	void HandleAsynRecv(const boost::system::error_code& error, int nTransferred);
	void HandleAsynSend(const boost::system::error_code& error, int nTransferred);
	void HandleAsynConnect(const boost::system::error_code& error);
	

	int Attach(boost::shared_ptr<boost::asio::ip::tcp::socket> &pSocket);
	const char* GetErrorString(int nErrorCode);
	int BindLocal(string Server, int nPort);
	std::string WhoOccupyThePort(int nPort);

	uint64_t m_handle;
	static std::recursive_mutex m_mtHandle;
	static uint64_t m_globalHandle;

public:
	int AsynRecv(unsigned char* pBuf, int nSize, lpSocketCallBack pCallback, void* pUser);
	int AsynSend(unsigned char* pBuf, int nSize, lpSocketCallBack pCallback, void* pUser);
	int Send(unsigned char* pBuf, int nSize);
	int AsynConnect(string Server, int nPort, lpSocketCallBack pCallback, void* pUser);
	int AsynConnectWithLocalPort(string Server, int nPort, string LocalIp, int LocalPort, lpSocketCallBack pCallback, void* pUser);
	int NotifyPullSend(unsigned char* pBuf, int nSize, lpSocketCallBack pCallback, void* pUser, bool bAsyn = true);
	int NotifyClose();
	int HandlePullSend();
	int HandleClose();
	void NotifyTimeout();
	int HandleTimeout();
	int GetLocalEndpoint(std::string& Ip, int& nPort);
	int GetNativeSocket();

	boost::shared_ptr<BoostAsioSocket> GetPtr()
	{
		return shared_from_this();
	}

	uint64_t GetHandle()
	{
		return m_handle;
	}
};

BoostAsioSocket.cpp文件

#include "BoostAsioSocket.h"
#include "BoostIoServicePool.h"
#include "Log.h"

#define SOCKET_BUF_SIZE	500*1024

#ifdef USE_LOWER_GCC
    static boost::atomic_int _nInstanceAsiSocket(0);
    static boost::atomic_int _nConnectCountAsiSocket(0);
#else
    static atomic<int> _nInstanceAsiSocket(0);
    static atomic<int> _nConnectCountAsiSocket(0);
#endif

std::recursive_mutex BoostAsioSocket::m_mtHandle;
uint64_t BoostAsioSocket::m_globalHandle = 0;

BoostAsioSocket::BoostAsioSocket()
	: m_ios(BoostIoServicePool::Instance()->GetIoService())
	, m_acceptor(m_ios)
	, m_strand(m_ios)
	, m_pListenCallback(nullptr)
    , m_pRecvCallback(nullptr)
    , m_pSendCallback(nullptr)
    , m_pConnectCallback(nullptr)
    , m_pNotifySendCallback(nullptr)
    , m_pListenUser(nullptr)
    , m_pRecvUser(nullptr)
    , m_pSendUser(nullptr)
    , m_pConnectUser(nullptr)
    , m_pNotifySendUser(nullptr)
	, m_IsReceiving(false)
	, m_IsSending(false)
	, m_enStatus(INF_SOCKET_CLOSED)
	, m_nRemotePort(0)
	, m_bClose(false)
{
	_nInstanceAsiSocket++;
	{
		std::unique_lock<std::recursive_mutex> autolock(m_mtHandle);
		++m_globalHandle;
		m_handle = m_globalHandle;
	}
	LOG_INFO("add new boost socket %p  with ios %p -- socket num is %d == handle %lld", this, &m_ios, _nInstanceAsiSocket.load(), m_handle);
}

BoostAsioSocket::BoostAsioSocket(boost::shared_ptr<boost::asio::ip::tcp::socket> &pSocket)
	: m_ios(pSocket->get_io_service())
	, m_acceptor(m_ios)
	, m_strand(m_ios)
    , m_pListenCallback(nullptr)
    , m_pRecvCallback(nullptr)
    , m_pSendCallback(nullptr)
    , m_pConnectCallback(nullptr)
    , m_pNotifySendCallback(nullptr)
    , m_pListenUser(nullptr)
    , m_pRecvUser(nullptr)
    , m_pSendUser(nullptr)
    , m_pConnectUser(nullptr)
    , m_pNotifySendUser(nullptr)
	, m_IsReceiving(false)
	, m_IsSending(false)
	, m_enStatus(INF_SOCKET_CLOSED)
	, m_nRemotePort(0)
{
    _nInstanceAsiSocket++;
	Attach(pSocket);
	{
		std::unique_lock<std::recursive_mutex> autolock(m_mtHandle);
		++m_globalHandle;
		m_handle = m_globalHandle;
	}
	LOG_INFO("add new boost socket %p with tcp socket %p ios %p -- socket num is %d  === handle %lld", this, pSocket.get(), &m_ios, _nInstanceAsiSocket.load(), m_handle);
}

BoostAsioSocket::~BoostAsioSocket()
{
	try
	{
		if (m_spSocket)
		{
			boost::system::error_code e;
			m_spSocket->close(e);
			if (e)
			{
				LOG_ERROR("close error : %s", e.message().c_str());
			}
		}
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("HandleClose error: %d reason:%s", e.code().value(), e.what());
	}
	_nInstanceAsiSocket--;
	LOG_INFO("delete boost socket %p -- socket num is %d", this, _nInstanceAsiSocket.load());
}


int BoostAsioSocket::StartListen(string Server, int nPort, lpSocketCallBack pCallback, void* pUser)
{
	m_pListenCallback = pCallback;
	m_pListenUser = pUser;
	m_enStatus = INF_SOCKET_LISTENING;
	try
	{
		boost::asio::ip::address_v4 address;
		address = boost::asio::ip::address_v4::from_string(Server);
		boost::asio::ip::tcp::endpoint localEndpoint(address, (unsigned short)nPort);
		m_acceptor.open(boost::asio::ip::tcp::v4());
		m_acceptor.set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
		m_acceptor.bind(localEndpoint);
		m_acceptor.listen();
	}
	catch (boost::system::system_error& e)
	{
		string msg = e.what();
		LOG_ERROR("listen %s:%d failed, %s, port occupied by: %s", Server.c_str(), nPort, msg.c_str(), WhoOccupyThePort(nPort).c_str());
		return 1;
	}
	AsynAccept();
	return 0;
}

int BoostAsioSocket::AsynAccept()
{
 	//m_spSocket = boost::make_shared<boost::asio::ip::tcp::socket>(boost::ref(m_ios));	
    m_spSocket = boost::make_shared<boost::asio::ip::tcp::socket>(boost::ref(BoostIoServicePool::Instance()->GetIoService()));  //此处如果不添加boost::ref,在linux下则无法编译通过
    try
    {
        m_acceptor.async_accept(*m_spSocket,
            boost::bind(&BoostAsioSocket::HandleAsynAccept, shared_from_this(), boost::asio::placeholders::error));
    }
    catch (boost::system::system_error &e)
    {
		LOG_ERROR("Failed to post accept message: %d, %s", e.code().value(), e.what());
        return 1;
    }
	return 0;
}

void BoostAsioSocket::HandleAsynAccept(const boost::system::error_code& error)
{
	boost::unique_lock<boost::recursive_mutex> lock(m_mutex);

	if ((m_enStatus == INF_SOCKET_CLOSING) || (m_enStatus == INF_SOCKET_CLOSED))
	{
		LOG_ERROR("m_enStatus is closing or closed: %d", m_enStatus);
		return;
	}

	string err = !error ? boost_str_e_ok : boost_str_e_error;
	if (error)
	{
		AsynAccept();
		return;
	}
	BoostAsioSocket* pSocket = nullptr;
	try
	{
		if (m_pListenCallback != nullptr)
		{
			m_spSocket->set_option(boost::asio::ip::tcp::socket::send_buffer_size(SOCKET_BUF_SIZE));
			m_spSocket->set_option(boost::asio::ip::tcp::socket::receive_buffer_size(SOCKET_BUF_SIZE));
			boost::asio::socket_base::linger option(true, 0);
			m_spSocket->set_option(option);
			//pSocket = new BoostAsioSocket(m_spSocket);
			auto ptr = boost::make_shared<BoostAsioSocket>(m_spSocket);
			pSocket = ptr.get();
			ACCEPT_SOCKET accept;
			accept.pSocket = pSocket;
			accept.IP = m_spSocket->remote_endpoint().address().to_string();
			accept.nPort = m_spSocket->remote_endpoint().port();
			LOG_INFO("accept boost fd %d", m_spSocket->native_handle());
			m_pListenCallback(m_pListenUser, this, boost_str_accept_new_socket.c_str(), err.c_str(), (unsigned char*)&accept, sizeof(ACCEPT_SOCKET));
			m_spSocket.reset();
		}
	}
	catch (...)
	{
		LOG_ERROR("Remote Address is invalid.");
		if (pSocket != nullptr)
		{
			delete pSocket;
		}
	}
	AsynAccept();
}

int BoostAsioSocket::StopListen()
{
	{
		boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
		if (m_acceptor.is_open())
		{
			try
			{
				m_acceptor.close();
			}
			catch (boost::system::system_error &e)
			{
			}
		}
		m_enStatus = INF_SOCKET_CLOSING;
		//m_strand.post(boost::bind(&BoostAsioSocket::HandleClose, shared_from_this()));
	}
	//m_evtClose.WaitEvent(1000000);
	return 0;
}

int BoostAsioSocket::Attach(boost::shared_ptr<boost::asio::ip::tcp::socket>& pSocket)
{
	m_spSocket = pSocket;

	try
	{
		boost::asio::socket_base::linger option(true, 0);
		m_spSocket->set_option(option);

		m_RemoteIP = m_spSocket->remote_endpoint().address().to_string();
		m_nRemotePort = m_spSocket->remote_endpoint().port();
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("error: %d reason:%s", e.code().value(), e.what());
	}

	m_enStatus = INF_SOCKET_CONNECTED;
	return 0;
}


int BoostAsioSocket::AsynRecv(unsigned char* pBuf, int nSize, lpSocketCallBack pCallback, void* pUser)
{
	boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
	if (m_IsReceiving)
	{
		return 1;
	}
	m_IsReceiving = true;
	m_pRecvCallback = pCallback;
	m_pRecvUser = pUser;
	m_BufReceving.pBuf = pBuf;
	m_BufReceving.nToTransfer = nSize;
	m_BufReceving.nTransferred = 0;
//	m_spSocket->async_read_some(boost::asio::buffer(pBuf, nSize), boost::bind(&BoostAsioSocket::HandleAsynRecv, shared_from_this(),
//		boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred));
	try
	{
		boost::asio::async_read(*m_spSocket, boost::asio::buffer(pBuf, nSize), boost::bind(&BoostAsioSocket::HandleAsynRecv, shared_from_this(),
			boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred));
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("error: %d reason:%s", e.code().value(), e.what());
	}

	return 0;
}


int BoostAsioSocket::AsynSend(unsigned char* pBuf, int nSize, lpSocketCallBack pCallback, void* pUser)
{
	boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
	if (!m_spSocket->is_open())
	{
		return 1;
	}
	if (m_IsSending)
	{
		return 1;
	}
	m_IsSending = true;
	m_pSendCallback = pCallback;
	m_pSendUser = pUser;
	m_BufSending.pBuf = pBuf;
	m_BufSending.nToTransfer = nSize;
	m_BufSending.nTransferred = 0;
	try
	{
		boost::asio::async_write(*m_spSocket, boost::asio::buffer(pBuf, nSize), boost::bind(&BoostAsioSocket::HandleAsynSend, shared_from_this(),
			boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred));
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("error: %d reason:%s", e.code().value(), e.what());
	}

	return 0;
}


int BoostAsioSocket::Send(unsigned char* pBuf, int nSize)
{
	//if (m_bClose)
	//{
	//	return 0;
	//}
	boost::system::error_code ec;
	try
	{
		
		if (m_spSocket)
		{
			boost::asio::write(*m_spSocket, boost::asio::buffer(pBuf, nSize), ec);
			if (ec.value())
			{
				LOG_ERROR("write to %s error : %s", m_RemoteIP.c_str(), ec.message().c_str());
				//m_spSocket->close();
			}
		}
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("error: %d reason:%s", e.code().value(), e.what());
	}
	return ec.value();
}

int BoostAsioSocket::AsynConnect(string Server, int nPort, lpSocketCallBack pCallback, void* pUser)
{
	m_pConnectCallback = pCallback;
	m_pConnectUser = pUser;
	boost::asio::ip::address_v4 address;
	try
	{
		address = boost::asio::ip::address_v4::from_string(Server);
	}
	catch (boost::system::system_error &e)
	{
		return 1;
	}
	m_enStatus = INF_SOCKET_CONNECTING;
	_nConnectCountAsiSocket++;
	m_RemoteIP = Server;
	m_nRemotePort = nPort;
	boost::asio::ip::tcp::endpoint ep(address, (unsigned short)nPort);
	m_spSocket = boost::make_shared<boost::asio::ip::tcp::socket>(boost::ref(m_ios));
	try
	{
		m_spSocket->async_connect(ep, boost::bind(&BoostAsioSocket::HandleAsynConnect, shared_from_this(),
			boost::asio::placeholders::error));
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("error: %d reason:%s", e.code().value(), e.what());
	}
	return 0;
}

int BoostAsioSocket::BindLocal(string Server, int nPort)
{
	try
	{
		if (m_spSocket)
		{
			m_spSocket->open(boost::asio::ip::tcp::v4());
			m_spSocket->bind(boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), nPort));
		}
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("error: %d reason:%s", e.code().value(), e.what());
	}
	return 0;
}

int BoostAsioSocket::AsynConnectWithLocalPort(string Server, int nPort, string LocalIp, int LocalPort, lpSocketCallBack pCallback, void * pUser)
{
	m_pConnectCallback = pCallback;
	m_pConnectUser = pUser;
	boost::asio::ip::address_v4 address;
	try
	{
		address = boost::asio::ip::address_v4::from_string(Server);
	}
	catch (boost::system::system_error &e)
	{
		return 1;
	}
	m_enStatus = INF_SOCKET_CONNECTING;
	_nConnectCountAsiSocket++;
	m_RemoteIP = Server;
	m_nRemotePort = nPort;
	boost::asio::ip::tcp::endpoint ep(address, (unsigned short)nPort);
	try
	{
		m_spSocket = boost::make_shared<boost::asio::ip::tcp::socket>(boost::ref(m_ios));
		LOG_INFO("socket %p with tcp socket %p", this, m_spSocket.get());
		BindLocal(LocalIp, LocalPort);
		m_spSocket->async_connect(ep, boost::bind(&BoostAsioSocket::HandleAsynConnect, shared_from_this(),
			boost::asio::placeholders::error));
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("error: %d reason:%s", e.code().value(), e.what());
	}
	return 0;
}


void BoostAsioSocket::HandleAsynRecv(const boost::system::error_code& error, int nTransferred)
{
	boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
	m_IsReceiving = false;
	if ((m_enStatus == INF_SOCKET_CLOSING) || (m_enStatus == INF_SOCKET_CLOSED))
	{
		return;
	}
	m_BufReceving.nTransferred = nTransferred;
	int code = error.value();
	std::string errString = error.message();
	if ( code == 2 )
	{
		//return;
	}
	if (m_pRecvCallback != nullptr)
	{
		m_pRecvCallback(m_pRecvUser, this,
			boost_str_receive_complete.c_str(), !error ? boost_str_e_ok.c_str() : boost_str_e_error.c_str(),
			(unsigned char*)m_BufReceving.pBuf, nTransferred);
	}
	if (!error)
	{

	}
	else
	{
		m_enStatus = INF_SOCKET_CLOSING;
	}
	return;
}


void BoostAsioSocket::HandleAsynSend(const boost::system::error_code& error, int nTransferred)
{
	boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
	if (!m_IsSending)
	{
		//bool be = true;
	}
	m_IsSending = false;
	if ((m_enStatus == INF_SOCKET_CLOSING) || (m_enStatus == INF_SOCKET_CLOSED))
	{
		return;
	}
	m_BufSending.nTransferred = nTransferred;
	if (m_BufSending.nToTransfer != nTransferred)
	{
		//bool be = true;
	}
    if (m_pSendCallback == nullptr)
	{
		//bool be = true;
	}

	int code = error.value();
	std::string errString = error.message();

    if (m_pSendCallback != nullptr)
	{
		m_pSendCallback(m_pSendUser, this,
			boost_str_send_complete.c_str(), !error ? boost_str_e_ok.c_str() : boost_str_e_error.c_str(),
			(unsigned char*)m_BufSending.pBuf, nTransferred);
	}
	if (!error)
	{

	}
	else
	{
		m_enStatus = INF_SOCKET_CLOSING;
	}
	return;
}


void BoostAsioSocket::HandleAsynConnect(const boost::system::error_code& error)
{
    _nConnectCountAsiSocket--;
	boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
	if ((m_enStatus == INF_SOCKET_CLOSING) || (m_enStatus == INF_SOCKET_CLOSED))
	{
		return;
	}
	try
	{
		m_spSocket->set_option(boost::asio::ip::tcp::socket::send_buffer_size(SOCKET_BUF_SIZE));
		m_spSocket->set_option(boost::asio::ip::tcp::socket::receive_buffer_size(SOCKET_BUF_SIZE));
		m_enStatus = !error ? INF_SOCKET_CONNECTED : INF_SOCKET_CLOSED;
		if (m_pConnectCallback != nullptr)
		{
			string strRet = boost_str_e_ok;
			if (error.value())
			{
				strRet = GetErrorString(error.value());
				LOG_ERROR("connect failed : error %s", strRet.c_str());
			}
			m_pConnectCallback(m_pConnectUser, this, boost_str_connect_complete.c_str(), strRet.c_str(), nullptr, 0);
		}
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("error: %d reason:%s", e.code().value(), e.what());
	}
	return;
}


int BoostAsioSocket::NotifyPullSend(unsigned char* pBuf, int nSize, lpSocketCallBack pCallback, void* pUser, bool bAsyn)
{
	boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
	if (m_IsSending)
	{
		return 1;
	}
	m_pNotifySendCallback = pCallback;
	m_pNotifySendUser = pUser;
	if (bAsyn)
	{
		m_strand.post(boost::bind(&BoostAsioSocket::HandlePullSend, shared_from_this()));
	}
	else
	{
		HandlePullSend();
	}
	return 0;
}


int BoostAsioSocket::NotifyClose()
{
	LOG_INFO("socket %p closed for %s : %d", this, m_RemoteIP.c_str(), m_nRemotePort);
	if (m_enStatus == INF_SOCKET_CLOSED)
	{
		return 0;
	}
	
	{
		//boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
		m_enStatus = INF_SOCKET_CLOSING;
		//m_strand.post(boost::bind(&BoostAsioSocket::HandleClose, shared_from_this()));
		HandleClose();
	}
	
	//if (!m_bClose)
	//	m_evtClose.WaitEvent(100);

	
	return 0;
}

int BoostAsioSocket::HandlePullSend()
{
	boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
	if ((m_enStatus == INF_SOCKET_CLOSING) || (m_enStatus == INF_SOCKET_CLOSED))
	{
		return 1;
	}
	if (m_IsSending)
	{
		return 1;
	}
    if (m_pNotifySendCallback != nullptr)
	{
        m_pNotifySendCallback(m_pNotifySendUser, this, boost_str_pull_send.c_str(), boost_str_e_ok.c_str(), nullptr, 0);
	}
	return 0;
}

int BoostAsioSocket::HandleClose()
{
	//boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
	m_enStatus = INF_SOCKET_CLOSED;
	boost::system::error_code e;
	try
	{
		if (m_spSocket.get() != nullptr)
		{
			int fd = m_spSocket->native_handle();
			LOG_INFO("close socket %p boost fd %d", this, fd);
			if (m_spSocket->is_open())
			{				
				m_spSocket->shutdown(boost::asio::socket_base::shutdown_both, e);
				if (e)
				{
					LOG_ERROR("shutdown error : %s", e.message().c_str());
				}
			}
			else
			{
				LOG_ERROR("close error : socket is not open");
			}

			m_spSocket->close(e);
			if (e)
			{
				LOG_ERROR("close error : %s", e.message().c_str());
			}
#ifdef __linux__
			int pid = getpid();
			std::ostringstream os;
			os << "/proc/" << pid << "/fd" + std::to_string(fd);
			if (!access(os.str().c_str(), F_OK))
			{
				LOG_WARN("boost fd %d still open , force to close", fd);
				//close(fd);
			}
#endif
			m_spSocket.reset();		
		}
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("HandleClose error: %d reason:%s", e.code().value(), e.what());
	}
	
	m_bClose = true;
	//m_evtClose.SetEvent();
	return 0;
}


const char* BoostAsioSocket::GetErrorString(int nErrorCode)
{
#if (defined _WIN32 || defined _WIN64)
	if (nErrorCode == WSAECONNREFUSED)
	{
		return boost_str_e_refused.c_str();
	}
	else if(nErrorCode == WSAETIMEDOUT)
	{
		return boost_str_e_timeout.c_str();
	}
#else
	if( nErrorCode == ETIMEDOUT )
		return boost_str_e_timeout.c_str();
	else if( nErrorCode == ECONNREFUSED )
		return boost_str_e_refused.c_str();
#endif

	return boost_str_e_error.c_str();
}


void BoostAsioSocket::NotifyTimeout()
{
	boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
	m_strand.post(boost::bind(&BoostAsioSocket::HandleTimeout, shared_from_this()));
	return;
}

int BoostAsioSocket::HandleTimeout()
{
	boost::unique_lock<boost::recursive_mutex> lock(m_mutex);
	m_enStatus = INF_SOCKET_CLOSING;
	if (!m_IsReceiving)
	{
		//int i = 0;
		return 0;
	}
	m_IsReceiving = false;
    if (m_pRecvCallback != nullptr)
	{
		m_pRecvCallback(m_pRecvUser, this,
			boost_str_receive_complete.c_str(), boost_str_e_timeout.c_str(), 
			(unsigned char*)m_BufReceving.pBuf, 0);
	}
	return 0;
}

int BoostAsioSocket::GetLocalEndpoint(std::string& Ip, int& nPort)
{
	boost::system::error_code e;
	try
	{
		if (m_spSocket.get() != nullptr)
		{
			Ip = m_spSocket->local_endpoint(e).address().to_string();
			nPort = m_spSocket->local_endpoint(e).port();
		}
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("GetLocalEndpoint error: %d reason:%s", e.code().value(), e.what());
	}
	return 0;
}

std::string BoostAsioSocket::WhoOccupyThePort(int nPort)
{
	std::string processName;

#ifdef WIN32 | WIN64
	return processName;
#else

	std::stringstream ios;
	ios << "lsof -i:" << nPort;

	FILE *fp = popen(ios.str().c_str(), "r");
	if (fp == nullptr)
		return processName;

	char line[1024] = { 0 };
	fgets(line, sizeof(line), fp); // eat first line: the title
	if (strncmp(line, "COMMAND", 7) != 0) // check if output is valid
		return processName;

	memset(line, 0, sizeof(line));
	fgets(line, sizeof(line), fp);

	ios.str("");
	ios << line;
	ios >> processName;

	pclose(fp);
	return processName;
#endif
}

int BoostAsioSocket::GetNativeSocket()
{
	try
	{
		if (m_spSocket)
			return m_spSocket->native_handle();			
	}
	catch (boost::system::system_error &e)
	{
		LOG_ERROR("GetNativeSocket error: %d reason:%s", e.code().value(), e.what());
	}
	return -1;
}

BoostIoServicePool.h文件

#pragma once


#include "configDef.h"


#include "boost/asio.hpp"
#include <boost/noncopyable.hpp> 
#include "boost/shared_ptr.hpp"
#include "boost/thread/thread.hpp"


class BoostIoServicePool
	: private boost::noncopyable
{
public:
	~BoostIoServicePool();
	static  BoostIoServicePool* Instance();
private:
	BoostIoServicePool();
	int Init(int nPoolSize);
	static BoostIoServicePool* _instance;
	typedef boost::shared_ptr<boost::asio::io_service> io_service_ptr;
	typedef boost::shared_ptr<boost::asio::io_service::work> work_ptr;

	std::vector<boost::shared_ptr<boost::thread> > threads;
	/// The pool of io_services. 
	std::vector<io_service_ptr> io_services_;

	/// The work that keeps the io_services running. 
	std::vector<work_ptr> work_;

	/// The next io_service to use for a connection. 
	std::size_t next_io_service_;

	boost::mutex m_mutex;
public:
	void Run();
	void Stop();
	boost::asio::io_service& GetIoService();
};

BoostIoServicePool.cpp 文件

#include "BoostIoServicePool.h"
#include <thread>
#include "Log.h"

BoostIoServicePool* BoostIoServicePool::_instance = nullptr;

BoostIoServicePool::BoostIoServicePool()
	: next_io_service_(0)
{
	Init(std::thread::hardware_concurrency()/2 + 1);
	Run();
}


BoostIoServicePool::~BoostIoServicePool()
{
	Stop();
}


BoostIoServicePool* BoostIoServicePool::Instance()
{
	if (_instance == nullptr)
	{
		_instance = new BoostIoServicePool();
	}
	return _instance;
}


int BoostIoServicePool::Init(int nPoolSize)
{
	int pool_size = nPoolSize;
	if (pool_size == 0)
	{
		pool_size = 1;
	}

	// Give all the io_services work to do so that their run() functions will not 
	// exit until they are explicitly stopped. 
	for (std::size_t i = 0; i < pool_size; ++i)
	{
		io_service_ptr io_service(new boost::asio::io_service);
		work_ptr work(new boost::asio::io_service::work(*io_service));
		io_services_.push_back(io_service);
		work_.push_back(work);
	}
	return 0;
}


void BoostIoServicePool::Run()
{
	// Create a pool of threads to run all of the io_services. 
	for (std::size_t i = 0; i < io_services_.size(); ++i)
	{
		boost::shared_ptr<boost::thread> thread(new boost::thread(
			boost::bind(&boost::asio::io_service::run, io_services_[i])));
#ifdef __linux__
		auto handle = thread->native_handle();
		//pthread_setname_np(handle,"VMS-BIOPool");
		std::string tName = MODULE_NAME + std::string("BIO");
		LOG_INFO("thread %d --- with name %s", handle, tName.c_str());
		pthread_setname_np(handle, tName.c_str());
#endif
		threads.push_back(thread);
	}
}


void BoostIoServicePool::Stop()
{
	// Explicitly stop all io_services. 
	for (std::size_t i = 0; i < io_services_.size(); ++i)
	{
		io_services_[i]->stop();
	}

	// Wait for all threads in the pool to exit. 
	for (std::size_t i = 0; i < threads.size(); ++i)
	{
		threads[i]->join();
	}
}


boost::asio::io_service& BoostIoServicePool::GetIoService()
{
	// Use a round-robin scheme to choose the next io_service to use. 
	boost::unique_lock<boost::mutex> lock(m_mutex);
	boost::asio::io_service& io_service = *io_services_[next_io_service_];
	++next_io_service_;
	if (next_io_service_ == io_services_.size())
		next_io_service_ = 0;
	return io_service;
}

WorkEvent.h

#pragma once

#include "configDef.h"

#ifdef USE_LOWER_GCC
    #include "boost/asio.hpp"
    #include "boost/shared_ptr.hpp"
    #include "boost/thread/thread.hpp"
    #include "boost/thread/mutex.hpp"
    #include "boost/thread/condition.hpp"
    #include "boost/bind.hpp"
  //  #include "boost/lambda/bind.hpp"
   // #include "boost/lambda/lambda.hpp"
#else
    #include <mutex>
    #include <condition_variable>
#endif

namespace ASYN_WORK
{

    class CWorkEvent
    {
    public:
	   CWorkEvent();
	   ~CWorkEvent();
    public:
	   void SetEvent();
	   void WaitEvent();
	   void WaitEvent(const unsigned long long llTime);//ms
	   void ResetEvent();
#ifdef USE_LOWER_GCC
       bool GetWake(){return m_bWake;};
#endif
    private:
#ifdef USE_LOWER_GCC
        boost::condition_variable m_conditionWake;
        boost::mutex m_mutexWake;
#else
        std::condition_variable m_conditionWake;
        std::mutex m_mutexWake;
#endif
	   bool m_bWake;
    };

}

WorkEvent.cpp

#include "WorkEvent.h"

#ifndef USE_LOWER_GCC
#include <chrono>
#endif

namespace ASYN_WORK
{

    CWorkEvent::CWorkEvent()
	   :m_bWake(false)
    {
    }

    CWorkEvent::~CWorkEvent()
    {
    }

    void CWorkEvent::SetEvent()
    {
#ifdef USE_LOWER_GCC
        boost::unique_lock<boost::mutex> lock(m_mutexWake);
#else
        std::unique_lock<std::mutex> lock(m_mutexWake);
#endif	 
        if (!m_bWake)
        {
            m_bWake = true;
            m_conditionWake.notify_one();
        }   
    }

    void CWorkEvent::WaitEvent()
    {
#ifdef USE_LOWER_GCC

        boost::unique_lock<boost::mutex> lock(m_mutexWake);               
        m_conditionWake.wait(lock, boost::bind(&CWorkEvent::GetWake, this));        //需要修改匿名函数
#else
        std::unique_lock<std::mutex> lock(m_mutexWake);
        m_conditionWake.wait(lock, [&](){ return m_bWake; });
#endif
	   m_bWake = false;
    }

    void CWorkEvent::WaitEvent(const unsigned long long llTime)
    {
#ifdef USE_LOWER_GCC
        boost::unique_lock<boost::mutex> lock(m_mutexWake);
        if (m_conditionWake.wait_for(lock, boost::chrono::milliseconds(llTime), boost::bind(&CWorkEvent::GetWake, this)))
#else
        std::unique_lock<std::mutex> lock(m_mutexWake);
        if (m_conditionWake.wait_for(lock, std::chrono::milliseconds(llTime), [&](){ return m_bWake; }))
#endif
        {
            m_bWake = false;
        }
    }

    void CWorkEvent::ResetEvent()
    {
#ifdef USE_LOWER_GCC
        boost::unique_lock<boost::mutex> lock(m_mutexWake);
#else
        std::unique_lock<std::mutex> lock(m_mutexWake);
#endif
	   m_bWake = false;
    }
}


service.cpp

#include "BoostAsioSocket.h"
#include<iostream>
#include<string>

int  Listener_SocketCallBack(void* pUser, void* pSocket, const char* CmdType, const char* Error, unsigned char* pBuf, int nSize)
{
	std::cout << "test" << std::endl;
	//对客户端消息的处理
	//
	return 0;
}

int main(void)
{	
	boost::shared_ptr<BoostAsioSocket> m_pSocket;
	m_pSocket = boost::make_shared<BoostAsioSocket>();
	std::string  Server = "127.0.0.1";
	int nPort = 5003;
	m_pSocket->StartListen(Server, nPort, Listener_SocketCallBack, NULL);

	m_pSocket->StopListen();
	m_pSocket.reset();
	return 0;
}

client.cpp


#include "BoostAsioSocket.h"
#include<iostream>
#include<string>

int CALLBACK Connection_SocketCallBack(void* pUser, void* pSocket, const char* CmdType, const char* Error, unsigned char* pBuf, int nSize)
{
	std::cout << "connect service success" << std::endl;
	return 0;
}

int  Connection_SocketRecvCallBack(void* pUser, void* pSocket, const char* CmdType, const char* Error, unsigned char* pBuf, int nSize)
{
	std::cout << "recv success" << std::endl;
	//对服务端发来的消息的处理
	//
	return 0;
}

int main(void)
{	
	boost::shared_ptr<BoostAsioSocket> m_pSocket;
	m_pSocket = boost::make_shared<BoostAsioSocket>();
	std::string  Server = "127.0.0.1";
	int nPort = 5003;
	m_pSocket->AsynConnect(Server, nPort, Connection_SocketCallBack, NULL);
	std::string  pBuf;
	pBuf = "hello";
	int nSize = pBuf.size();
	//m_pSocket->Send(pBuf.c_str(), nSize);//syn
	m_pSocket->AsynSend(pBuf.c_str(), nSize, Connection_SocketCallBack, NULL);
	
	m_pSocket->AsynRecv(pBuf, nSize, Connection_SocketRecvCallBack, NULL);
	
	//m_pSocket->NotifyTimeout();
	m_pSocket->NotifyClose();
	m_pSocket.reset();
	return 0;
}
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,以下是一个使用boost实现线程池的示例代码: ```c++ #include <iostream> #include <boost/thread.hpp> #include <boost/bind.hpp> #include <boost/asio.hpp> class ThreadPool { public: ThreadPool(size_t num_threads) : work_(io_service_) { for (size_t i = 0; i < num_threads; ++i) { workers_.create_thread(boost::bind(&boost::asio::io_service::run, &io_service_)); } } ~ThreadPool() { io_service_.stop(); workers_.join_all(); } template <typename Task> void execute(Task task) { io_service_.post(task); } private: boost::asio::io_service io_service_; boost::asio::io_service::work work_; boost::thread_group workers_; }; void task1() { std::cout << "Task 1 executed in thread " << boost::this_thread::get_id() << std::endl; } void task2() { std::cout << "Task 2 executed in thread " << boost::this_thread::get_id() << std::endl; } int main() { ThreadPool pool(2); pool.execute(task1); pool.execute(task2); return 0; } ``` 在上面的示例中,我们首先定义了一个ThreadPool类,它包含了一个boost::asio::io_service对象和一个boost::thread_group对象。在ThreadPool的构造函数中,我们创建了多个工作线程,并将它们绑定到io_service对象上。execute()函数接受一个任务,并将其提交到io_service队列中。在ThreadPool的析构函数中,我们停止io_service并等待所有工作线程完成。 在main函数中,我们创建了一个ThreadPool对象,并使用execute()函数提交了两个任务任务。每个任务将打印它是在哪个线程中执行的。 请注意,这只是一个简单的示例,实际的线程池可能需要更多的功能。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值