数据结构
typedef struct redisObject {
unsigned type:4;
unsigned encoding:4;
unsigned lru:LRU_BITS; /* LRU time (relative to global lru_clock) or
* LFU data (least significant 8 bits frequency
* and most significant 16 bits access time). */
int refcount;
void *ptr;
} robj
struct redisServer {
...
unsigned int lruclock; /* Clock for LRU eviction *
...
}
redisObject中的lru和redisServer中的lruclock都是以秒为单位的绝对时间,lruclock表示当前时间,lru表示上次访问的时间。redis中用这两个时间差做为数据淘汰的判断依据。
int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) {
...
unsigned long lruclock = getLRUClock();
atomicSet(server.lruclock,lruclock);
...
}
在serverCron函数中会定时更新server.lruclock;而robj中的lru则会在对象创建或每次访问、修改时更新成当时的server.lruclock。
数据淘汰机制
淘汰策略
Redis提供了以下几种数据淘汰策略:
- volatile-lru:从设置过期的数据集中淘汰最少使用的数据;
- volatile-ttl:从设置过期的数据集中淘汰即将过期的数据(离过期时间最近);
- volatile-random:从设置过期的数据集中随机选取数据淘汰;
- allkeys-lru:从所有 数据集中选取使用最少的数据;
- allkeys-random:从所有数据集中任意选取数据淘汰;
- no-envicition:不进行淘汰;
淘汰流程
redis每执行一个命令,都会检查是否需要淘汰数据,前提是要在配置文件中设置maxmemory。
int processCommand(client *c) {
...
/* Handle the maxmemory directive.
*
* First we try to free some memory if possible (if there are volatile
* keys in the dataset). If there are not the only thing we can do
* is returning an error. */
if (server.maxmemory) {
int retval = freeMemoryIfNeeded();
/* freeMemoryIfNeeded may flush slave output buffers. This may result
* into a slave, that may be the active client, to be freed. */
if (server.current_client == NULL) return C_ERR;
/* It was impossible to free enough memory, and the command the client
* is trying to execute is denied during OOM conditions? Error. */
if ((c->cmd->flags & CMD_DENYOOM) && retval == C_ERR) {
flagTransaction(c);
addReply(c, shared.oomerr);
return C_OK;
}
}
...
}
在processCommand函数中调用freeMemoryIfNeeded进行淘汰数据。该函数中会计算出redis目前占用的内存总数,但会排除两部分内存:
- 从节点的输出缓冲区的内存
- AOF缓冲区的内存
然后根据淘汰策略,循环淘汰数据,直到满足内存要求。这里只分析volatile-lru、volatile-ttl以及allkeys-lru三种策略,其它几种随机策略都比较简单。这三种策略中会调用evictionPoolPopulate进行数据淘汰。
void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evictionPoolEntry *pool) {
//若淘汰策略为volatile-lru或volatile-ttl,则sampledict=db->expires,否则sampledict=db->dict
int j, k, count;
dictEntry *samples[server.maxmemory_samples];
//从sampledict中随机挑选出maxmemory_samples个key,默认为5个,放到samples中
count = dictGetSomeKeys(sampledict,samples,server.maxmemory_samples);
for (j = 0; j < count; j++) {
unsigned long long idle;
sds key;
robj *o;
dictEntry *de;
de = samples[j];
key = dictGetKey(de);
/* If the dictionary we are sampling from is not the main
* dictionary (but the expires one) we need to lookup the key
* again in the key dictionary to obtain the value object. */
if (server.maxmemory_policy != MAXMEMORY_VOLATILE_TTL) {
if (sampledict != keydict) de = dictFind(keydict, key);
o = dictGetVal(de);
}
/* Calculate the idle time according to the policy. This is called
* idle just because the code initially handled LRU, but is in fact
* just a score where an higher score means better candidate. */
if (server.maxmemory_policy & MAXMEMORY_FLAG_LRU) {
idle = estimateObjectIdleTime(o);
} else if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
/* When we use an LRU policy, we sort the keys by idle time
* so that we expire keys starting from greater idle time.
* However when the policy is an LFU one, we have a frequency
* estimation, and we want to evict keys with lower frequency
* first. So inside the pool we put objects using the inverted
* frequency subtracting the actual frequency to the maximum
* frequency of 255. */
idle = 255-LFUDecrAndReturn(o);
} else if (server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) {
/* In this case the sooner the expire the better. */
idle = ULLONG_MAX - (long)dictGetVal(de);
} else {
serverPanic("Unknown eviction policy in evictionPoolPopulate()");
}
/* Insert the element inside the pool.
* First, find the first empty bucket or the first populated
* bucket that has an idle time smaller than our idle time. */
k = 0;
while (k < EVPOOL_SIZE &&
pool[k].key &&
pool[k].idle < idle) k++;
if (k == 0 && pool[EVPOOL_SIZE-1].key != NULL) {
/* Can't insert if the element is < the worst element we have
* and there are no empty buckets. */
continue;
} else if (k < EVPOOL_SIZE && pool[k].key == NULL) {
/* Inserting into empty position. No setup needed before insert. */
} else {
/* Inserting in the middle. Now k points to the first element
* greater than the element to insert. */
if (pool[EVPOOL_SIZE-1].key == NULL) {
/* Free space on the right? Insert at k shifting
* all the elements from k to end to the right. */
/* Save SDS before overwriting. */
sds cached = pool[EVPOOL_SIZE-1].cached;
memmove(pool+k+1,pool+k,
sizeof(pool[0])*(EVPOOL_SIZE-k-1));
pool[k].cached = cached;
} else {
/* No free space on right? Insert at k-1 */
k--;
/* Shift all elements on the left of k (included) to the
* left, so we discard the element with smaller idle time. */
sds cached = pool[0].cached; /* Save SDS before overwriting. */
if (pool[0].key != pool[0].cached) sdsfree(pool[0].key);
memmove(pool,pool+1,sizeof(pool[0])*k);
pool[k].cached = cached;
}
}
/* Try to reuse the cached SDS string allocated in the pool entry,
* because allocating and deallocating this object is costly
* (according to the profiler, not my fantasy. Remember:
* premature optimizbla bla bla bla. */
int klen = sdslen(key);
if (klen > EVPOOL_CACHED_SDS_SIZE) {
pool[k].key = sdsdup(key);
} else {
memcpy(pool[k].cached,key,klen+1);
sdssetlen(pool[k].cached,klen);
pool[k].key = pool[k].cached;
}
pool[k].idle = idle;
pool[k].dbid = dbid;
}
}
/* Given an object returns the min number of milliseconds the object was never
* requested, using an approximated LRU algorithm. */
unsigned long long estimateObjectIdleTime(robj *o) {
unsigned long long lruclock = LRU_CLOCK();
if (lruclock >= o->lru) {
return (lruclock - o->lru) * LRU_CLOCK_RESOLUTION;
} else {
return (lruclock + (LRU_CLOCK_MAX - o->lru)) *
LRU_CLOCK_RESOLUTION;
}
}
struct evictionPoolEntry {
unsigned long long idle; /* Object idle time (inverse frequency for LFU) */
sds key; /* Key name. */
sds cached; /* Cached SDS object for key name. */
int dbid; /* Key DB number. */
};
这里用到了一个淘汰池,长度为16,池内根据lru计算的空闲时间从小到大排序。随机挑选出来的每个key,若满足以下任一条件则会放入池中:
- 池还未填满
- 空闲时间比池中已有key的空闲时间大
然后倒序遍历淘汰池,删除空闲时间最大的对象。
redis中的lru淘汰机制不是严格地按照空闲时间淘汰,而是采用了一种近似策略,随机选取一些对象,淘汰其中空闲时间最大的,通过引入淘汰池保存上一次挑选的历史数据,使得结果更接近全局淘汰,同时又节省了cpu资源。
几点疑惑
- evictionPoolEntry淘汰池中的cached字段,作用不太清楚
- evictionPoolEntry保存了空闲时间idle,该字段一旦写入就不再更新,即使后来该key对应的对象被访问了一次,其实际空闲时间会得到更新,但idle还是原来的值。