本篇博客针对Acceptor类和TcpServer类做下小结。
博客代码来自于陈硕的muduo网络库,github地址https://github.com/chenshuo/muduo
学习笔记:
Acceptor类专注于做一件事情,就是在服务端接收所有的客户端请求,将连接成功的客户端交给TcpServer进一步处理。当服务端处于“1+N”多线程模式时,Acceptor的所有IO事件在“1”代表的线程中执行,连接进来的多个客户端在“N”代表的线程池中执行IO事件。
TcpServer类作为用户可见类,用户可以设置线程池中子线程数量,当有新连接进来时,TcpServer循环抽取一个子线程处理该连接的IO事件,从而保证各子线程的负载平衡。
Acceptor.h
#ifndef MUDUO_NET_ACCEPTOR_H
#define MUDUO_NET_ACCEPTOR_H
#include <functional>
#include "muduo/net/Channel.h"
#include "muduo/net/Socket.h"
namespace muduo
{
namespace net
{
class EventLoop;
class InetAddress;
///
/// Acceptor of incoming TCP connections.
///
class Acceptor : noncopyable
{
public:
typedef std::function<void (int sockfd, const InetAddress&)> NewConnectionCallback;
Acceptor(EventLoop* loop, const InetAddress& listenAddr, bool reuseport);
~Acceptor();
//设置新连接回调,Acceptor把新连接交给上层的TcpServer处理
void setNewConnectionCallback(const NewConnectionCallback& cb)
{ newConnectionCallback_ = cb; }
bool listenning() const { return listenning_; }
void listen();//监听服务端
private:
void handleRead();//acceptSocket_有新来的连接
EventLoop* loop_; //通过loop_将acceptSocket_监听读事件注册到Poller中
Socket acceptSocket_; //等待socket等待客户端连接
Channel acceptChannel_;//监听acceptSocket_描述符上的读事件
NewConnectionCallback newConnectionCallback_;//TcpServer的新连接回调
bool listenning_;
int idleFd_;//预留空文件描述符,预防客户端连接数太多导致服务端无法连接
};
} // namespace net
} // namespace muduo
#endif // MUDUO_NET_ACCEPTOR_H
Acceptor.cpp
#include "muduo/net/Acceptor.h"
#include "muduo/base/Logging.h"
#include "muduo/net/EventLoop.h"
#include "muduo/net/InetAddress.h"
#include "muduo/net/SocketsOps.h"
#include <errno.h>
#include <fcntl.h>
//#include <sys/types.h>
//#include <sys/stat.h>
#include <unistd.h>
using namespace muduo;
using namespace muduo::net;
Acceptor::Acceptor(EventLoop* loop, const InetAddress& listenAddr, bool reuseport)
: loop_(loop),
acceptSocket_(sockets::createNonblockingOrDie(listenAddr.family())),//创建非阻塞socket
acceptChannel_(loop, acceptSocket_.fd()),
listenning_(false),
idleFd_(::open("/dev/null", O_RDONLY | O_CLOEXEC))//打开一个空文件
{
assert(idleFd_ >= 0);
acceptSocket_.setReuseAddr(true);
acceptSocket_.setReusePort(reuseport);
acceptSocket_.bindAddress(listenAddr);
acceptChannel_.setReadCallback(
std::bind(&Acceptor::handleRead, this));//当acceptSocket_有新连接时会调用该回调
}
Acceptor::~Acceptor()
{
//注销Poller中的acceptChannel_
acceptChannel_.disableAll();
acceptChannel_.remove();
//关闭空文件描述符
::close(idleFd_);
}
void Acceptor::listen()
{
loop_->assertInLoopThread();
listenning_ = true;
//acceptSocket_监听新连接
acceptSocket_.listen();
//当acceptSocket_监听到了新连接,acceptChannel_同时也获取到了读事件
acceptChannel_.enableReading();
}
void Acceptor::handleRead()
{
loop_->assertInLoopThread();
InetAddress peerAddr;
//FIXME loop until no more
int connfd = acceptSocket_.accept(&peerAddr);
if (connfd >= 0)
{
// string hostport = peerAddr.toIpPort();
// LOG_TRACE << "Accepts of " << hostport;
if (newConnectionCallback_)
{
//把新连接丢给TcpServer处理,Acceptor继续执行监听新连接的工作
newConnectionCallback_(connfd, peerAddr);
}
else
{
sockets::close(connfd);
}
}
else
{
LOG_SYSERR << "in Acceptor::handleRead";
// Read the section named "The special problem of
// accept()ing when you can't" in libev's doc.
// By Marc Lehmann, author of libev.
if (errno == EMFILE)
{
//描述符已经耗尽,把空文件描述符释放出来连接当前接进来的客户端
::close(idleFd_);
idleFd_ = ::accept(acceptSocket_.fd(), NULL, NULL);
//把客户端接进来后立马关闭连接释放仅剩的1个描述符
::close(idleFd_);
//让这个描述符重新占用空文件
idleFd_ = ::open("/dev/null", O_RDONLY | O_CLOEXEC);
}
}
}
TcpServer.h
#ifndef MUDUO_NET_TCPSERVER_H
#define MUDUO_NET_TCPSERVER_H
#include "muduo/base/Atomic.h"
#include "muduo/base/Types.h"
#include "muduo/net/TcpConnection.h"
#include <map>
namespace muduo
{
namespace net
{
class Acceptor;
class EventLoop;
class EventLoopThreadPool;
///
/// TCP server, supports single-threaded and thread-pool models.
///
/// This is an interface class, so don't expose too much details.
class TcpServer : noncopyable
{
public:
typedef std::function<void(EventLoop*)> ThreadInitCallback;
enum Option
{
kNoReusePort,
kReusePort,
};
//TcpServer(EventLoop* loop, const InetAddress& listenAddr);
TcpServer(EventLoop* loop,
const InetAddress& listenAddr,
const string& nameArg,
Option option = kNoReusePort);
~TcpServer(); // force out-line dtor, for std::unique_ptr members.
const string& ipPort() const { return ipPort_; }
const string& name() const { return name_; }
EventLoop* getLoop() const { return loop_; }
/// Set the number of threads for handling input.
///
/// Always accepts new connection in loop's thread.
/// Must be called before @c start
/// @param numThreads
/// - 0 means all I/O in loop's thread, no thread will created.
/// this is the default value.
/// - 1 means all I/O in another thread.
/// - N means a thread pool with N threads, new connections
/// are assigned on a round-robin basis.
void setThreadNum(int numThreads); //设置线程池IO线程个数
void setThreadInitCallback(const ThreadInitCallback& cb)//设置初始回调
{ threadInitCallback_ = cb; }
/// valid after calling start()
std::shared_ptr<EventLoopThreadPool> threadPool()
{ return threadPool_; }
/// Starts the server if it's not listenning.
///
/// It's harmless to call it multiple times.
/// Thread safe.
void start(); //开启服务端监听
/// Set connection callback.
/// Not thread safe.
void setConnectionCallback(const ConnectionCallback& cb)
{ connectionCallback_ = cb; }
/// Set message callback.
/// Not thread safe.
void setMessageCallback(const MessageCallback& cb)
{ messageCallback_ = cb; }
/// Set write complete callback.
/// Not thread safe.
void setWriteCompleteCallback(const WriteCompleteCallback& cb)
{ writeCompleteCallback_ = cb; }
private:
/// Not thread safe, but in loop
void newConnection(int sockfd, const InetAddress& peerAddr);
/// Thread safe.
void removeConnection(const TcpConnectionPtr& conn);
/// Not thread safe, but in loop
void removeConnectionInLoop(const TcpConnectionPtr& conn);
typedef std::map<string, TcpConnectionPtr> ConnectionMap;
EventLoop* loop_; //处理acceptor_的IO事件
const string ipPort_;
const string name_;
std::unique_ptr<Acceptor> acceptor_; //循环监听所有客户端的连接请求
std::shared_ptr<EventLoopThreadPool> threadPool_;//线程池,已连接客户端的IO事件
ConnectionCallback connectionCallback_; //连接回调
MessageCallback messageCallback_; //消息回调
WriteCompleteCallback writeCompleteCallback_; //写完成回调
ThreadInitCallback threadInitCallback_; //线程初始回调
AtomicInt32 started_;//启动标志
// always in loop thread
int nextConnId_;//记录客户端个数
ConnectionMap connections_;//保存客户端连接
};
} // namespace net
} // namespace muduo
#endif // MUDUO_NET_TCPSERVER_H
TcpServer.cpp
#include "muduo/net/TcpServer.h"
#include "muduo/base/Logging.h"
#include "muduo/net/Acceptor.h"
#include "muduo/net/EventLoop.h"
#include "muduo/net/EventLoopThreadPool.h"
#include "muduo/net/SocketsOps.h"
#include <stdio.h> // snprintf
using namespace muduo;
using namespace muduo::net;
TcpServer::TcpServer(EventLoop* loop,
const InetAddress& listenAddr,
const string& nameArg,
Option option)
: loop_(CHECK_NOTNULL(loop)),
ipPort_(listenAddr.toIpPort()),
name_(nameArg),
acceptor_(new Acceptor(loop, listenAddr, option == kReusePort)),
threadPool_(new EventLoopThreadPool(loop, name_)),
connectionCallback_(defaultConnectionCallback),
messageCallback_(defaultMessageCallback),
nextConnId_(1)
{
//acceptor_每收到一个客户端的连接,就将连接转给TcpServer::newConnection()处理
acceptor_->setNewConnectionCallback(
std::bind(&TcpServer::newConnection, this, _1, _2));
}
TcpServer::~TcpServer()
{
loop_->assertInLoopThread();
LOG_TRACE << "TcpServer::~TcpServer [" << name_ << "] destructing";
//逐个销毁映射表里的连接
for (auto& item : connections_)
{
TcpConnectionPtr conn(item.second);
/*reset后conn指向的对象引用计数减为1,待执行完for循环后conn这个栈对象被销毁,
引用计数减为0,至此连接彻底被销毁*/
item.second.reset();
conn->getLoop()->runInLoop(
std::bind(&TcpConnection::connectDestroyed, conn));
}
}
//设置IO线程池子线程个数
void TcpServer::setThreadNum(int numThreads)
{
assert(0 <= numThreads);
threadPool_->setThreadNum(numThreads);
}
void TcpServer::start()
{
if (started_.getAndSet(1) == 0)
{
//开启线程池,准备处理服务端的IO事件
threadPool_->start(threadInitCallback_);
//acceptor_开启监听
assert(!acceptor_->listenning());
loop_->runInLoop(
std::bind(&Acceptor::listen, get_pointer(acceptor_)));
}
}
void TcpServer::newConnection(int sockfd, const InetAddress& peerAddr)
{
loop_->assertInLoopThread();
//给新连接进来的socket分配一个loop,以后该socket上所有的IO操作都在此loop中完成
EventLoop* ioLoop = threadPool_->getNextLoop();
char buf[64];
snprintf(buf, sizeof buf, "-%s#%d", ipPort_.c_str(), nextConnId_);
++nextConnId_;
string connName = name_ + buf;
LOG_INFO << "TcpServer::newConnection [" << name_
<< "] - new connection [" << connName
<< "] from " << peerAddr.toIpPort();
InetAddress localAddr(sockets::getLocalAddr(sockfd));
// FIXME poll with zero timeout to double confirm the new connection
// FIXME use make_shared if necessary
//至此conn与ioLoop绑定,conn上的IO事件都将在ioLoop中执行
TcpConnectionPtr conn(new TcpConnection(ioLoop,
connName,
sockfd,
localAddr,
peerAddr));
connections_[connName] = conn;
//给新连接进来的socket设置回调
conn->setConnectionCallback(connectionCallback_);
conn->setMessageCallback(messageCallback_);
conn->setWriteCompleteCallback(writeCompleteCallback_);
conn->setCloseCallback(
std::bind(&TcpServer::removeConnection, this, _1)); // FIXME: unsafe
ioLoop->runInLoop(std::bind(&TcpConnection::connectEstablished, conn));
}
void TcpServer::removeConnection(const TcpConnectionPtr& conn)
{
// FIXME: unsafe
loop_->runInLoop(std::bind(&TcpServer::removeConnectionInLoop, this, conn));
}
void TcpServer::removeConnectionInLoop(const TcpConnectionPtr& conn)
{
loop_->assertInLoopThread();
LOG_INFO << "TcpServer::removeConnectionInLoop [" << name_
<< "] - connection " << conn->name();
//删除映射表的连接
size_t n = connections_.erase(conn->name());
(void)n;
assert(n == 1);
EventLoop* ioLoop = conn->getLoop();
//注销socket在Poller中的监听
ioLoop->queueInLoop(
std::bind(&TcpConnection::connectDestroyed, conn));
}