公司有个项目,需要做一个服务器,我这边准备使用Boost作为网络通信的框架,然后再使用thrift作为进程间通信的接口,使用到的Boost版本为:boost_1_67_0,thrift版本为:thrift-0.11.0,thrift使用的是以前项目编译成的静态库。
在写服务器之前,需要对Boost与thrift进行调研,因此就写了一个小demo进行测试。
对于Boost来说,使用到了Boost的asio异步通信机制,而thrift就用其进行进程间通信。
Boost端的代码如下:
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include <thrift/transport/TSocket.h>
#include <thrift/transport/TBufferTransports.h>
#include <thrift/protocol/TBinaryProtocol.h>
#include <thrift/transport/TServerSocket.h>
#include <thrift/server/TSimpleServer.h>
#include "DevMgrServ.h"
#include "DataGateway.h"
using boost::shared_ptr;
using boost::asio::ip::tcp;
using namespace apache::thrift;
using namespace apache::thrift::protocol;
using namespace apache::thrift::transport;
using namespace ::apache::thrift::server;
using namespace std;
using namespace rapidjson;
::apache::thrift::stdcxx::shared_ptr<TTransport> g_DevMgrSocket;
::apache::thrift::stdcxx::shared_ptr<TTransport> g_DevMgrTransport;
::apache::thrift::stdcxx::shared_ptr<TProtocol> g_DevMgrProtocol;
DevMgrServClient* g_DevMgrClient;
class TcpSession : public std::enable_shared_from_this<TcpSession>
{
public:
TcpSession(boost::asio::io_service &io_service)
: socket_(io_service),
strand_(io_service)
{
}
tcp::socket &socket()
{
return socket_;
}
void start()
{
doRead();
}
private:
void doRead()
{
socket_.async_read_some(boost::asio::buffer(buffer_, buffer_.size()),
strand_.wrap(boost::bind(&TcpSession::handle_read,
shared_from_this(),
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred)));
}
void handle_read(boost::system::error_code ec, std::size_t bytes_transferred);
void doWrite(std::size_t length)
{
boost::asio::async_write(socket_, boost::asio::buffer(buffer_, length),
strand_.wrap( boost::bind(&TcpSession::handle_write,
shared_from_this(),
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred)));
}
void handle_write(boost::system::error_code ec, std::size_t bytes_transferred)
{
if (!ec)
{
doRead();
}
}
private:
socket_ptr devsock_ptr;
boost::asio::io_service::strand strand_;
std::array<char, 8192> buffer_;
};
void TcpSession::handle_read(boost::system::error_code ec, std::size_t bytes_transferred)
{
if (!ec)//对于错误码的判断一定要进行,否则不管什么情况都会执行doWrite,这将产生意想不到的事情
{
std::string RecvStr(std::begin(buffer_), std::end(buffer_));
//获取头部信息
EC2_PROTOCOL_HEADER head;
memcpy(&head,RecvStr.c_str(),sizeof(EC2_PROTOCOL_HEADER));
cout<<head.u32MsgType<<endl;
cout<<head.u32MsgLen<<endl;
if( head.u32MsgType == EC2_MSG_ONLINE_REQ )
{
try
{
g_DevMgrTransport->open();
DevMgrInfo DevMgrData;
char TmpRecvBuf[2048] = {0};
//拷贝数据头部
memcpy((void *)TmpRecvBuf,(const void *)&g_HeartBeatInfo,sizeof(HEARTBEAT_STATE_S));
//拷贝数据
memcpy((void *)(TmpRecvBuf + sizeof(HEARTBEAT_STATE_S)),(const void *)(RecvStr.c_str()+ sizeof(EC2_PROTOCOL_HEADER)),head.u32MsgLen);
string TmpStr(TmpRecvBuf,sizeof(HEARTBEAT_STATE_S)+head.u32MsgLen );
DevMgrData.DevData = TmpStr;
g_DevMgrClient->PutOnlineData(DevMgrData);
g_DevMgrTransport->close();
}
catch(TException& tx)
{
printf("Thrift ERROR: %s", tx.what());
g_DevMgrTransport->close();
}
}
//异步写数据
doWrite(bytes_transferred);
}
else//客户端关闭连接
{
cout << "handle_read" << boost::system::system_error(ec).what() << endl;
socket_.shutdown(boost::asio::ip::tcp::socket::shutdown_both, const_cast<boost::system::error_code&>(ec));
socket_.close(const_cast<boost::system::error_code&>(ec));
}
}
class Server
{
public:
Server(boost::asio::io_service &io_service, unsigned short port)
: io_service_(io_service),
acceptor_(io_service, tcp::endpoint(tcp::v4(), port))
{
doAccept();
}
void doAccept()
{
auto conn = std::make_shared<TcpSession>(io_service_);
acceptor_.async_accept(conn->socket(),
[this, conn](boost::system::error_code ec)
{
try
{
if (!ec)
{
conn->start();
}
this->doAccept();
}
catch (...)
{
std::cout<<" accept abnormal "<<std::endl;
return;
}
});
}
private:
boost::asio::io_service &io_service_;
tcp::acceptor acceptor_;
};
void InitThrift(void)
{
TSocket *pSocket = new TSocket(DEVMGR_THRIFT_SOCKET_PATH);
pSocket->setRecvTimeout(10*1000);
pSocket->setSendTimeout(10*1000);
pSocket->setNoDelay(true);
::apache::thrift::stdcxx::shared_ptr<TTransport> Tmpg_DevMgrSocket(pSocket);
::apache::thrift::stdcxx::shared_ptr<TTransport> Tmpg_DevMgrTransport(new TFramedTransport(Tmpg_DevMgrSocket));
::apache::thrift::stdcxx::shared_ptr<TProtocol> Tmpg_DevMgrProtocol(new TBinaryProtocol(Tmpg_DevMgrTransport));
g_DevMgrSocket = Tmpg_DevMgrSocket;
g_DevMgrTransport = Tmpg_DevMgrTransport;
g_DevMgrProtocol = Tmpg_DevMgrProtocol;
g_DevMgrClient = new DevMgrServClient(Tmpg_DevMgrProtocol);
}
int DataGatewayInit(int argc, char** argv)
{
printf("Entry %s\n",argv[1]);
try
{
boost::asio::io_service io_service;
InitThrift();
unsigned short port = SERVER_PORT;
Server server(io_service, port);
io_service.run();
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
Boost作为服务器接收其他客户端的连接,当数据接收完毕,将会在回调函数里面,作为thriift客户端调用DevMgr进程的thrift接口,open -> send -> recv -> close,将从客户端接收到的数据通过thrift转发给DevMgr进程。
DevMgr进程thrift服务端代码如下:
#include <thrift/protocol/TBinaryProtocol.h>
#include <thrift/server/TSimpleServer.h>
#include <thrift/transport/TServerSocket.h>
#include <thrift/transport/TBufferTransports.h>
#include <event2/event.h>
#include <thrift/server/TNonblockingServer.h>
#include <thrift/transport/TNonblockingServerSocket.h>
#include <thrift/transport/TNonblockingServerTransport.h>
#include <thrift/concurrency/ThreadManager.h>
#include <thrift/concurrency/PlatformThreadFactory.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <iostream>
using namespace std;
using namespace ::apache::thrift;
using namespace ::apache::thrift::protocol;
using namespace ::apache::thrift::transport;
using namespace ::apache::thrift::server;
#define DEVMGR_THRIFT_SOCKET_PATH ("./devmgr.socket")
class DevMgrServHandler : virtual public DevMgrServIf
{
public:
DevMgrServHandler()
{
// Your initialization goes here
}
void PutOnlineData(const DevMgrInfo& MgrData)
{
// Your implementation goes here
printf("putOnlineData\n");
cout<<MgrData.DevData<<endl;
}
};
int DevMgrInit(int argc, char **argv)
{
using namespace ::apache::thrift;
using namespace ::apache::thrift::protocol;
using namespace ::apache::thrift::transport;
using namespace ::apache::thrift::server;
using namespace ::apache::thrift::concurrency;
using namespace ::apache::thrift::stdcxx;
shared_ptr<DevMgrServHandler> handler(new DevMgrServHandler());
shared_ptr<TProcessor> processor(new DevMgrServProcessor(handler));
shared_ptr<TProtocolFactory> protocolFactory(new TBinaryProtocolFactory());
shared_ptr<TNonblockingServerSocket> serverTransport(new TNonblockingServerSocket(DEVMGR_THRIFT_SOCKET_PATH));
shared_ptr<ThreadManager> threadManager = ThreadManager::newSimpleThreadManager(1);
shared_ptr<PosixThreadFactory> threadFactory = shared_ptr<PosixThreadFactory> (new PosixThreadFactory()); //PosixThreadFactory可以自定义(继承于ThreadFactory)
threadManager->threadFactory(threadFactory);
threadManager->start();
TNonblockingServer server(processor, protocolFactory, serverTransport, threadManager);
try
{
server.serve();
}
catch(TException e)
{
cout<<"Server.serve() failed "<<e.what()<<endl;
return -1;
}
return 0;
}
thrift服务端,采用的是TNonblockingServerSocket,非阻塞机制,并且采用Unix的使用文件的方式进行数据通信的方式,这是因为UNIX domain socket 用于 IPC 更有效率:不需要经过网络协议栈,不需要打包拆包、计算校验和、维护序号和应答等,只是将应用层数据从一个进程拷贝到另一个进程。在收到客户端的数据后,由于只是demo,因此只是将其打印出来即可。
编写好代码之后,就需要进行编译了,交叉编译工具为:arm-hisiv400-linux-g++,在编译的时候就发现第一个问题,是编译boost相关文件的错误:
error: ‘current_exception’ is not a member of ‘std’
p.set_exception(std::current_exception());
在编译选项已经加上了std=c++11,还是编译不过
解决方法:
查找海思编译器的资料,发现需要加上-march=armv7-a编译选项,才能编译通过。
问题2:
在编译通过之后,运行代码,发现在open的时候就会产生段错误,经过多天的排查,确认不是代码的问题,而是编译选项-march=armv7-a的问题,由于使用的是以前项目编译过得Thrift库,因此去问编译该库的同事,以前使用的时候都没有问题,就是以前编译的时候没有加上该参数,因此认为可能是该参数的问题。
解决方法:
使用-march=armv7-a编译选项重新编译thrift库,问题得以解决。