背景
zlm对epoll的运用炉火纯青,它也是整个项目的引擎,在此一探究竟。
创建
EventPollerPool使用单例模式
#define INSTANCE_IMP(class_name, ...) \
class_name &class_name::Instance() { \
static std::shared_ptr<class_name> s_instance(new class_name(__VA_ARGS__)); \
static class_name &s_instance_ref = *s_instance; \
return s_instance_ref; \
}
INSTANCE_IMP(EventPollerPool)
在EventPollerPool构造函数中初始化EventPoller
// 在main初始化函数中设置了poller size
EventPollerPool::setPoolSize(threads);
static bool s_enable_cpu_affinity = true;
// 第一次调用EventPollerPool的Instance函数时,构造EventPollerPool
EventPollerPool::EventPollerPool() {
auto size = addPoller("event poller", s_pool_size, ThreadPool::PRIORITY_HIGHEST, true, s_enable_cpu_affinity);
TaskExecutorGetterImp::addPoller
auto cpus = thread::hardware_concurrency();
size = size > 0 ? size : cpus;
for (size_t i = 0; i < size; ++i)
auto full_name = name + " " + to_string(i);
EventPoller::Ptr poller(new EventPoller(full_name, (ThreadPool::Priority)priority));
poller->runLoop(false, register_thread);
poller->async([i, cpus, full_name, enable_cpu_affinity]() {
setThreadName(full_name.data());
if (enable_cpu_affinity) {
setThreadAffinity(i % cpus);
}
});
_threads.emplace_back(std::move(poller));
创建EventPoller并初始化
EventPoller::EventPoller(std::string name, ThreadPool::Priority priority)
: _name(std::move(name)), _priority(priority) {
SockUtil::setNoBlocked(_pipe.readFD());
SockUtil::setNoBlocked(_pipe.writeFD());
_epoll_fd = epoll_create(EPOLL_SIZE);
SockUtil::setCloExec(_epoll_fd);
addEvent(_pipe.readFD(), Event_Read, [this](int event) { onPipeEvent(); });
}
运行
创建thread并运行,EventPoller持有该线程,并在该线程运行epoll_wait。
- 当前线程调用EventPoller::runLoop,创建thread线程。
TaskExecutorGetterImp::addPoller
poller->runLoop(false, register_thread);
EventPoller::runLoop
_loop_thread = new thread(&EventPoller::runLoop, this, true, ref_self);
- 运行thread,在新创建的线程中再次运行EventPoller::runLoop,后面该线程将会阻塞运行。
EventPoller::runLoop
ThreadPool::setPriority(_priority);
_loop_thread_id = this_thread::get_id();
s_current_poller = shared_from_this();
_exit_flag = false;
struct epoll_event events[EPOLL_SIZE];
while (!_exit_flag)
uint64_t minDelay = getMinDelay();
int ret = epoll_wait(_epoll_fd, events, EPOLL_SIZE, minDelay ? minDelay : -1);
if (ret <= 0)
continue; // 超时或被打断
for (int i = 0; i < ret; ++i)
struct epoll_event &ev = events[i];
int fd = ev.data.fd;
auto it = _event_map.find(fd);
if (it == _event_map.end())
epoll_ctl(_epoll_fd, EPOLL_CTL_DEL, fd, nullptr);
continue;
auto cb = it->second;
(*cb)(toPoller(ev.events));
使用
添加事件
主要添加两类事件,一个是监听套接字的监听事件,一个是客户端套接字的读写事件。添加时指定该套接字对应的回调函数,epoll事件到来时,根据套接字搜索得到对应的回调进行执行。
int EventPoller::addEvent(int fd, int event, PollEventCB cb)
struct epoll_event ev = {0};
ev.events = (toEpoll(event)) | EPOLLEXCLUSIVE;
ev.data.fd = fd;
int ret = epoll_ctl(_epoll_fd, EPOLL_CTL_ADD, fd, &ev);
if (ret == 0)
_event_map.emplace(fd, std::make_shared<PollEventCB>(std::move(cb)));
return ret;
// 监听套接字设置监听事件
Socket::listen
weak_ptr<SockFD> weak_sock = sock;
weak_ptr<Socket> weak_self = shared_from_this();
_poller->addEvent(sock->rawFd(), EventPoller::Event_Read | EventPoller::Event_Error, [weak_self, weak_sock](int event) {
strong_self->onAccept(strong_sock, event);
}
异步执行
原理:把待执行的task放入到该poller的执行队列中,然后唤醒该poller所在线程去一次性执行队列中的所有累积任务。
EventPoller::async
EventPoller::async_l
EventPoller::async_l(TaskIn task_in)
auto task = std::make_shared<Task>(std::move(task_in));
{
lock_guard<mutex> lck(_mtx_task);
_list_task.emplace_back(task);
}
_pipe.write("", 1);
return task;
EventPoller::onPipeEvent
_pipe.read(buf, sizeof(buf));
decltype(_list_task) _list_swap;
{
lock_guard<mutex> lck(_mtx_task);
_list_swap.swap(_list_task);
}
_list_swap.for_each([&](const Task::Ptr &task) {
(*task)();
}
getPoller
根据输入参数,决定返回当前线程对应的poller,还是负载最少的poller。
EventPollerPool::getPoller(bool prefer_current_thread)
auto poller = EventPoller::getCurrentPoller();
if (prefer_current_thread && _prefer_current_thread && poller)
return poller;
return dynamic_pointer_cast<EventPoller>(getExecutor());