一、慢查询
慢查询其实说法是Redist在实际应用中对超过时间阈值(默认10ms)的查询进行日志保存。之所以对这个命令进行一个单独的分析,主要这是对REIDS的效率的分析有重要的作用。在Redis中,一条命令的查询从发送到执行,直到返回结果,其中都要耗费时间,但是在redis的统计过程中,只统计执行的时间。也就是说,慢查询一定是执行的过程太慢。
影响慢查询设置的有两个参数:
slowlog-log-slower-than 慢查询阈值(微秒)
slowlog-max-len 慢查询日志数量
这两个参数既可以通过命令config set来设置也可以直接修改配置文件。这两个参数需要根据实际的应用场景和经验不断的进行修改完善,最终有一个经验值范围。
二、源码分析
源码主要在slowlog.h和slowlog.c中:
//slowlog.h
#define SLOWLOG_ENTRY_MAX_ARGC 32
#define SLOWLOG_ENTRY_MAX_STRING 128
//慢查询的数据结构体
/* This structure defines an entry inside the slow log list */
typedef struct slowlogEntry {
robj **argv; //这个robj熟悉吧,二级指针表示多个,下面的argc表示数量
int argc;
long long id; /* Unique entry identifier. */
long long duration; /* Time spent by the query, in microseconds. */
time_t time; /* Unix time at which the query was executed. */
sds cname; /* Client name. */
sds peerid; /* Client network address. */
} slowlogEntry;
/* Exported API */
void slowlogInit(void);
void slowlogPushEntryIfNeeded(client *c, robj **argv, int argc, long long duration);//慢查询判断
/* Exported commands */
void slowlogCommand(client *c);//这个就是真正对命令进行处理的部分了
有没有发现这个头文件有点简单的不得了的感觉,再看看c文件是不是:
/* Create a new slowlog entry.
* Incrementing the ref count of all the objects retained is up to
* this function. */
slowlogEntry *slowlogCreateEntry(client *c, robj **argv, int argc, long long duration) {
//分配空间
slowlogEntry *se = zmalloc(sizeof(*se));
int j, slargc = argc;
//检查参数是否符合要求,否则强制设置最大限制
if (slargc > SLOWLOG_ENTRY_MAX_ARGC) slargc = SLOWLOG_ENTRY_MAX_ARGC;
se->argc = slargc;
se->argv = zmalloc(sizeof(robj*)*slargc);
for (j = 0; j < slargc; j++) {
/* Logging too many arguments is a useless memory waste, so we stop
* at SLOWLOG_ENTRY_MAX_ARGC, but use the last argument to specify
* how many remaining arguments there were in the original command. */
//处理舍弃数量,在最后一个参数
if (slargc != argc && j == slargc-1) {
se->argv[j] = createObject(OBJ_STRING,
sdscatprintf(sdsempty(),"... (%d more arguments)",
argc-slargc+1));
} else {
/* Trim too long strings as well... */
//过长参数截断
if (argv[j]->type == OBJ_STRING &&
sdsEncodedObject(argv[j]) &&
sdslen(argv[j]->ptr) > SLOWLOG_ENTRY_MAX_STRING)
{
sds s = sdsnewlen(argv[j]->ptr, SLOWLOG_ENTRY_MAX_STRING);
s = sdscatprintf(s,"... (%lu more bytes)",
(unsigned long)
sdslen(argv[j]->ptr) - SLOWLOG_ENTRY_MAX_STRING);
se->argv[j] = createObject(OBJ_STRING,s);
} else if (argv[j]->refcount == OBJ_SHARED_REFCOUNT) {
se->argv[j] = argv[j];
} else {
/* Here we need to dupliacate the string objects composing the
* argument vector of the command, because those may otherwise
* end shared with string objects stored into keys. Having
* shared objects between any part of Redis, and the data
* structure holding the data, is a problem: FLUSHALL ASYNC
* may release the shared string object and create a race. */
se->argv[j] = dupStringObject(argv[j]);
}
}
}
//设置其它相关的时间、ID和执行时间等
se->time = time(NULL);
se->duration = duration;
se->id = server.slowlog_entry_id++;
se->peerid = sdsnew(getClientPeerId(c));
se->cname = c->name ? sdsnew(c->name->ptr) : sdsempty();
return se;
}
//下面这个就是普通的内存释放,不过处理了一下引用计数
/* Free a slow log entry. The argument is void so that the prototype of this
* function matches the one of the 'free' method of adlist.c.
*
* This function will take care to release all the retained object. */
void slowlogFreeEntry(void *septr) {
slowlogEntry *se = septr;
int j;
for (j = 0; j < se->argc; j++)
decrRefCount(se->argv[j]);
zfree(se->argv);
sdsfree(se->peerid);
sdsfree(se->cname);
zfree(se);
}
/* Initialize the slow log. This function should be called a single time
* at server startup. */
void slowlogInit(void) {
//慢查询日志其实就是一个链表,在此处创建
server.slowlog = listCreate();
//处理慢查询日志ID,不断+1
server.slowlog_entry_id = 0;
//释放资源
listSetFreeMethod(server.slowlog,slowlogFreeEntry);
}
/* Push a new entry into the slow log.
* This function will make sure to trim the slow log accordingly to the
* configured max length. */
void slowlogPushEntryIfNeeded(client *c, robj **argv, int argc, long long duration) {
//根据参数判断慢查询是否关闭,如果关闭直接返回
if (server.slowlog_log_slower_than < 0) return; /* Slowlog disabled */
//判断是否符合慢查询条件,符合的话推入list,注意因为头部插入,越晚反而越靠前
if (duration >= server.slowlog_log_slower_than)
listAddNodeHead(server.slowlog,
slowlogCreateEntry(c,argv,argc,duration));
/* Remove old entries if needed. */
//根据参数长度来裁剪
while (listLength(server.slowlog) > server.slowlog_max_len)
listDelNode(server.slowlog,listLast(server.slowlog));
}
/* Remove all the entries from the current slow log. */
void slowlogReset(void) {
while (listLength(server.slowlog) > 0)
listDelNode(server.slowlog,listLast(server.slowlog));
}
/* The SLOWLOG command. Implements all the subcommands needed to handle the
* Redis slow log. */
void slowlogCommand(client *c) {
//判断命令合法性和是否为帮助
if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) {
const char *help[] = {
"GET [count] -- Return top entries from the slowlog (default: 10)."
" Entries are made of:",
" id, timestamp, time in microseconds, arguments array, client IP and port, client name",
"LEN -- Return the length of the slowlog.",
"RESET -- Reset the slowlog.",
NULL
};
addReplyHelp(c, help);
//命令参数数量为2,处理Reset,清空并返回OK
} else if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"reset")) {
slowlogReset();
addReply(c,shared.ok);
//处理Len命令,直接返回list长度,即日志内的数量
} else if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"len")) {
addReplyLongLong(c,listLength(server.slowlog));
//Get命令处理,主要区分有没有后面的数量
} else if ((c->argc == 2 || c->argc == 3) &&
!strcasecmp(c->argv[1]->ptr,"get"))
{
long count = 10, sent = 0;
listIter li;
void *totentries;
listNode *ln;
slowlogEntry *se;
//处理带数量的Get,如果转换数量为LONG出错,直接返回
if (c->argc == 3 &&
getLongFromObjectOrReply(c,c->argv[2],&count,NULL) != C_OK)
return;
//下来就是链表的遍历了,在遍历过程中进行数据的处理
listRewind(server.slowlog,&li);
totentries = addReplyDeferredLen(c);
while(count-- && (ln = listNext(&li))) {
int j;
se = ln->value;
addReplyArrayLen(c,6);
addReplyLongLong(c,se->id);
addReplyLongLong(c,se->time);
addReplyLongLong(c,se->duration);
addReplyArrayLen(c,se->argc);
for (j = 0; j < se->argc; j++)
addReplyBulk(c,se->argv[j]);
addReplyBulkCBuffer(c,se->peerid,sdslen(se->peerid));
addReplyBulkCBuffer(c,se->cname,sdslen(se->cname));
sent++;
}
setDeferredArrayLen(c,totentries,sent);
} else {
addReplySubcommandSyntaxError(c);
}
}
真心的不多,一百来行代码,分析一下,只要是对基础的数据结构和常见的命令处理有过经验,这绝对是不麻烦的一件事儿,有问题看上面的注释即可。
三、基本使用
1、慢查询命令
可以使用下面的命令处理当前的日志:
slowlog get [n] //慢查询日志获取
slowlog len //获取慢查询日志长度
slowlog reset //删除慢查询日志
应用这几条命令可以对日志进行分析处理。
2、简单分析
产生慢查询的原因:
有一些命令会产生慢查询:save,keys 所有(类似数据库的全查询);大集合操作,其中DEL一个大集合也可能产生。hgetall,smembers等;
持久化也可能产生慢查询:bgsave,aof;
反复不断的处理过期键;
最后一种就是一些命令被其它命令阻塞,导致的慢查询;
解决的方法就是根据实际情况来处理,比如扩大redis的连接池,修改周期处理命令,对持久化进行优化等等。这个就需要随机应变,不能僵化的照搬应用了。
四、总结
redis的慢查询其实是提供了一个对其自身管理和化化以及风险控制的接口,通过这个接口,对可能造成效率和安全影响的条件进行反馈,通过这些反馈的日志,可以帮助数据库管理人员尽快的找到问题所在并进行优化。可见redis能够迅速占领内存NOSQL市场并有着不错业绩,确实是有独到之处。
学习源码,学习人家的设计思想和架构方法,这才是硬道理。