https://github.com/chenshuo/muduo/blob/master/examples/sudoku/stat.h
// This is not a standalone header
class SudokuStat : muduo::noncopyable
{
public:
SudokuStat(const ThreadPool& pool)
: pool_(pool),
lastSecond_(0),
requests_(kSeconds),
latencies_(kSeconds),
totalRequests_(0),
totalResponses_(0),
totalSolved_(0),
badRequests_(0),
droppedRequests_(0),
totalLatency_(0),
badLatency_(0)
{
}
string report() const
{
LogStream result;
size_t queueSize = pool_.queueSize();
result << "task_queue_size " << queueSize << '\n';
{
MutexLockGuard lock(mutex_);
result << "total_requests " << totalRequests_ << '\n'; // 总共的请求数量
result << "total_responses " << totalResponses_ << '\n'; // 总响应数量
result << "total_solved " << totalSolved_ << '\n';
result << "bad_requests " << badRequests_ << '\n';
result << "dropped_requests " << droppedRequests_ << '\n';
result << "latency_sum_us " << totalLatency_ << '\n'; // 所有响应的总延迟时间
if (badLatency_ > 0)
{
result << "bad_latency" << badLatency_ << '\n';
}
result << "last_second " << lastSecond_ << '\n'; // 最近一次client的请求,us给得response时间
int64_t requests = 0;
result << "requests_per_second";
for (size_t i = 0; i < requests_.size(); ++i)
{
requests += requests_[i]; // 这个60s内总请求数量
result << ' ' << requests_[i]; // 每秒的rps
}
result << '\n';
result << "requests_60s " << requests << '\n'; // 60s内总请求数量
int64_t latency = 0;
result << "latency_sum_us_per_second";
for (size_t i = 0; i < latencies_.size(); ++i)
{
latency += latencies_[i]; // 60s内总请求数量
result << ' ' << latencies_[i]; // 每秒的总延迟
}
result << '\n';
result << "latency_sum_us_60s " << latency << '\n';
int64_t latencyAvg60s = requests == 0 ? 0 : latency / requests; // 60s的平均延迟
result << "latency_us_60s " << latencyAvg60s << '\n';
int64_t latencyAvg = totalResponses_ == 0 ? 0 : totalLatency_ / totalResponses_; // 这个进程启动收到的所有请求延迟/所有响应数量
result << "latency_us_avg " << latencyAvg << '\n';
}
return result.buffer().toString();
}
string reset()
{
{
MutexLockGuard lock(mutex_);
lastSecond_ = 0;
requests_.clear();
latencies_.clear();
totalRequests_ = 0;
totalResponses_ = 0;
totalSolved_ = 0;
badRequests_ = 0;
totalLatency_ = 0;
badLatency_ = 0;
}
return "reset done.";
}
// 这里server已经计算完client的请求
void recordResponse(Timestamp now, Timestamp receive, bool solved)
{
// 处理完成server端业务当前的秒数
const time_t second = now.secondsSinceEpoch();
// 延迟时间微秒
const int64_t elapsed_us = now.microSecondsSinceEpoch() - receive.microSecondsSinceEpoch();
MutexLockGuard lock(mutex_);
assert(requests_.size() == latencies_.size());
// 总相应数量+1
++totalResponses_;
if (solved)
// 总结解决数量+1
++totalSolved_;
if (elapsed_us < 0)
{
++badLatency_;
return;
}
// 总延迟时间
totalLatency_ += elapsed_us;
// 找到循环队列第一个秒
const time_t firstSecond = lastSecond_ - static_cast<ssize_t>(requests_.size()) + 1;
// 线程池中可能在相同时间计算完成,不同的client请求,这里都要加锁完成
// 下面的逻辑是判断最近一次给客户端影响的时间 和本次 server给点client响应时间的比较
// 给对应circular_buffer时间秒数增加请求数量
// 最近的时间和当前时间相同
if (lastSecond_ == second)
{
// the most common case
++requests_.back();
latencies_.back() += elapsed_us;
}
else if (lastSecond_ + 1 == second || lastSecond_ == 0)
{
// 增加了1秒
// next second
lastSecond_ = second;
requests_.push_back(0);
latencies_.push_back(0);
++requests_.back();
latencies_.back() += elapsed_us;
}
else if (second > lastSecond_)
{
// 当前时间 比 最近一次response的时间 > 1秒
// jump ahead
if (second < lastSecond_ + kSeconds)
{
// 当前时间 与 最近response时间差 < 60, 说明circular_buffer中有些时间没有处理请求
// 跳出循环的时候lastSecond 等于 second
// eg. lastSecond_ == 100, second < 160
while (lastSecond_ < second)
{
requests_.push_back(0);
latencies_.push_back(0);
// 最近一秒的叠加
++lastSecond_;
}
}
else
{
// 与上一次response相差超过60s
// eg. lastSecond_ == 100, second >= 160
requests_.clear();
latencies_.clear();
lastSecond_ = second;
requests_.push_back(0);
latencies_.push_back(0);
}
// 请求数量++
++requests_.back();
latencies_.back() += elapsed_us;
}
else if (second >= firstSecond)
{
// 当前时间比最近一次时间response小,这个时间比circular_buffer的对头时间要长,
// jump backwards
// eg. lastSecond_ = 150, size = 10, second > 140
// FIXME: if second > lastSecond_ - kSeconds, push_front()
// 找到对应circular_buffer的下标
size_t idx = second - firstSecond;
assert(idx < requests_.size());
++requests_[idx];
latencies_[idx] += elapsed_us;
//不更新 最近响应最晚的时间
}
else
{
assert(second < firstSecond);
// discard
// eg. lastSecond_ = 150, size = 10, second <= 140
}
assert(requests_.size() == latencies_.size());
}
void recordRequest()
{
MutexLockGuard lock(mutex_);
++totalRequests_;
}
void recordBadRequest()
{
MutexLockGuard lock(mutex_);
++badRequests_;
}
void recordDroppedRequest()
{
MutexLockGuard lock(mutex_);
++droppedRequests_;
}
private:
const ThreadPool& pool_; // only for ThreadPool::queueSize()
mutable MutexLock mutex_;
// invariant:
// 0. requests_.size() == latencies_.size()
// 1. if lastSecond_ > 0, requests_.back() is for that second
// 2. requests_.front() is for second (last second - size() + 1)
// 最近一次处理完请求的响应时间
time_t lastSecond_;
// 每秒的请求数量,每个下表代表时间相差1s
boost::circular_buffer<int64_t> requests_;
// 每秒延迟时间的总和,每个下表代表时间相差1s
boost::circular_buffer<int64_t> latencies_;
int64_t totalRequests_, totalResponses_, totalSolved_, badRequests_, droppedRequests_, totalLatency_, badLatency_;
// FIXME int128_t for totalLatency_;
static const int kSeconds = 60;
};