封装了一个使用timerfd和epoll实现的定时器
我的思路是
- 定时器类初始化时候直接启动一个线程,线程只负责定时事件触发后派发任务
- epoll定时器事件触发的时候,将注册的定时事件投递给任务线程
- 例子中任务线程使用了一个线程池
头文件
#ifndef CCTimer_H_
#define CCTimer_H_
#include "ThreadPool.h"
#include <sys/epoll.h>
#include <sys/timerfd.h>
#include <cstring>
#include <stdint.h>
#include "SafeMap.h"
typedef std::function<void()> TimerCallback;
inline void memZero(void* p, size_t n)
{
std::memset(p, 0, n);
}
struct TimerInfo{
int timerfd;
TimerCallback timerFunc;
uint32_t timerId;
bool isPeriodic;
};
class CCTimer {
public:
CCTimer(const size_t thread_count = 1);
~CCTimer();
bool setTimeEvent(const uint32_t timerId, const uint32_t ms, TimerCallback cb, const bool isPeriodic = false);
bool cancelTimeEvent(const uint32_t timerId);
private:
void handleTimerfdInEpoll();
bool timerIdIsExist(const uint32_t timerId);
bool epollAddTimerFd(const uint32_t timerId, const int timerfd);
bool epollDelTimerFd(const int timerfd);
bool timerFdSetTime(const int timerfd, const uint32_t ms, const bool isPeriodic = false);
bool stopTimerfdSetTime(const int timerfd);
void readTimerfd(int timerfd);
private:
static const int initEventListSize_ = 16;
std::shared_ptr<thread_pool> threadPool_;//灵活掌握,可以使用自己的线程池
const int epollfd_;
typedef std::vector<struct epoll_event> EventList;
///<触发的事件填充
EventList events_;
SafeMap<uint32_t, TimerInfo> timerMap_; //这个map如果没有多线程去操作同一个定时器类对象的话,可以换成普通的map
};
#endif
源文件
#include "Timer.h"
#include <unistd.h>
#include <iostream>
CCTimer::CCTimer(const size_t thread_count):
epollfd_(::epoll_create1(EPOLL_CLOEXEC)),
events_(initEventListSize_)
{
///<创建线程池, 最好是持有一个线程池指针
threadPool_ = std::make_shared<thread_pool>(thread_count);
///<创建一个线程做epool事件监听
std::thread t(&CCTimer::handleTimerfdInEpoll, this);
t.detach();
}
CCTimer::~CCTimer()
{
::close(epollfd_);
}
bool CCTimer::setTimeEvent(const uint32_t timerId, const uint32_t ms, TimerCallback cb, const bool isPeriodic)
{
if (timerIdIsExist(timerId)) {
return false;
}
int timerfd = ::timerfd_create(CLOCK_MONOTONIC,TFD_NONBLOCK | TFD_CLOEXEC);
if (timerfd < 0) {
return false;
}
if (!timerFdSetTime(timerfd, ms, isPeriodic)) {
return false;
}
if (!epollAddTimerFd(timerId, timerfd)) {
return false;
}
timerMap_[timerId].timerFunc = cb;
timerMap_[timerId].isPeriodic = isPeriodic;
return true;
}
bool CCTimer::cancelTimeEvent(const uint32_t timerId)
{
if (!timerIdIsExist(timerId)) {
return false;
}
if (!stopTimerfdSetTime(timerMap_[timerId].timerfd)) {
return false;
}
///< 从epoll循环中去掉fd的监听
epollDelTimerFd(timerMap_[timerId].timerfd);
///< 从map中删除timerId
timerMap_.erase(timerId);
return true;
}
bool CCTimer::timerIdIsExist(const uint32_t timerId)
{
return timerMap_.find(timerId) != timerMap_.end();
}
bool CCTimer::timerFdSetTime(const int timerfd, const uint32_t ms, const bool isPeriodic)
{
struct itimerspec newValue;
memZero(&newValue, sizeof newValue);
if (ms >= 1000) {
newValue.it_value.tv_sec = ms / 1000;
}
newValue.it_value.tv_nsec = (ms % 1000) * 1000;
if (isPeriodic) {
newValue.it_interval = newValue.it_value;
}
if (::timerfd_settime(timerfd, 0, &newValue, NULL)) {
return false;
}
return true;
}
bool CCTimer::stopTimerfdSetTime(const int timerfd)
{
struct itimerspec newValue;
memZero(&newValue, sizeof newValue);
if (::timerfd_settime(timerfd, 0, &newValue, NULL)) {
return false;
}
return true;
}
bool CCTimer::epollAddTimerFd(const uint32_t timerId, const int timerfd)
{
struct epoll_event event;
TimerInfo info;
info.timerfd = timerfd;
info.timerId = timerId;
timerMap_[timerId] = info;
memZero(&event, sizeof event);
event.data.ptr = &timerMap_[timerId];
event.events = EPOLLIN;
if (::epoll_ctl(epollfd_, EPOLL_CTL_ADD, timerfd, &event) < 0) {
timerMap_.erase(timerId);
return false;
}
return true;
}
bool CCTimer::epollDelTimerFd(const int timerfd)
{
struct epoll_event event;
memZero(&event, sizeof event);
event.events = EPOLLOUT;
event.data.fd = timerfd;
if (::epoll_ctl(epollfd_, EPOLL_CTL_DEL, timerfd, &event) < 0) {
return false;
}
return true;
}
void CCTimer::readTimerfd(int timerfd)
{
uint64_t howmany;
ssize_t n = ::read(timerfd, &howmany, sizeof howmany);
if (n != sizeof howmany) {
///< error log
return;
}
}
void CCTimer::handleTimerfdInEpoll()
{
while (true) {
int numEvents = ::epoll_wait(epollfd_,
&*events_.begin(),
static_cast<int>(events_.size()),
0);
///< 事件触发之后就将函数提交给线程池去执行
for (int i = 0; i < numEvents; i++) {
TimerInfo* infoPtr = static_cast<TimerInfo*>(events_[i].data.ptr);
readTimerfd(infoPtr->timerfd);
if (threadPool_ == nullptr) {
infoPtr->timerFunc(); //你要是么有线程池,就直接执行回调
} else {
threadPool_->execute(infoPtr->timerFunc);//推荐其他线程执行,保证定时准确
}
if (!infoPtr->isPeriodic) {
cancelTimeEvent(infoPtr->timerId);
}
}
///< 说明一次触发的事件太多,扩大容量
if (static_cast<size_t>(numEvents) == events_.size()) {
events_.resize(events_.size()*2);
}
}
}
使用例子
#include "Timer.h"
#include <iostream>
#include <chrono>
using namespace std;
void printANum(int a) {
std::cout << a << std::endl;
}
void printAString(std::string str)
{
std::cout << str << std::endl;
}
int main (int argc, char** argv)
{
CCTimer cc;
auto f = std::bind(printANum, 100);
auto f2 = std::bind(printAString, "hahahahhaha");
cc.setTimeEvent(1, 1000, f, true);
cc.setTimeEvent(2, 1000, f2, true);
std::chrono::seconds sec(5);
std::this_thread::sleep_for(sec);
cc.cancelTimeEvent(1);
std::chrono::seconds sec2(5);
std::this_thread::sleep_for(sec2);
}
这个定时器实现,用到了线程池,代码中为了演示,直接在定时器类中创建线程池,只是为了我自己测试方便,实际上线程池类应该是个全局对象,定时器只持有指针即可。评论中说需要线程池的代码。放在下边。
头文件
#pragma once
#include <mutex>
#include <condition_variable>
#include <functional>
#include <queue>
#include <thread>
class thread_pool {
public:
explicit thread_pool(const size_t thread_count);
thread_pool() = default;
thread_pool(thread_pool &&) = default;
~thread_pool();
public:
template <class F>
void execute(F &&task) {
{
std::lock_guard<std::mutex> lk(data_->mtx_);
data_->tasks_.emplace(std::forward<F>(task));
}
data_->cond_.notify_one();
}
private:
struct data {
std::mutex mtx_;
std::condition_variable cond_;
bool is_shutdown_ = false;
std::queue<std::function<void()>> tasks_;
};
std::shared_ptr<data> data_;
private:
void run_in_thread();
};
源文件
#include "ThreadPool.h"
thread_pool::thread_pool(const size_t thread_count)
: data_(std::make_shared<data>()) {
for (size_t i = 0; i < thread_count; ++i) {
std::thread(&thread_pool::run_in_thread, this).detach();
}
}
thread_pool::~thread_pool() {
if ((bool)data_) {
{
std::lock_guard<std::mutex> lk(data_->mtx_);
data_->is_shutdown_ = true;
}
data_->cond_.notify_all();
}
}
void thread_pool::run_in_thread() {
std::unique_lock<std::mutex> lk(data_->mtx_);
for (;;) {
if (!data_->tasks_.empty()) {
auto current_func = std::move(data_->tasks_.front());
data_->tasks_.pop();
lk.unlock();
current_func();
lk.lock();
} else if (data_->is_shutdown_) {
break;
} else {
data_->cond_.wait(lk);
}
}
}
线程池使用例子
void intHandler(int a) {
std::cout << "[" << std::this_thread::get_id() << "]" << a << std::endl;
}
int main(int argc, char **argv) {
thread_pool tp(3);
for (int i = 0; i < 1000; i++) {
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
tp.execute(std::bind(intHandler, i));
}
return 0;
}