建议使用sourceinsight查看Memcached源码,本人所用源码是Memcached-1.4.23
我喜欢从main函数逐步跟踪查看源码,进行逐步分析。这也是本人第一次尝试,主要通过这样的方式提高学习。
Memcached.c
main函数源码不在贴出
struct passwd *pw;
linux使用struct passwd管理用户信息。
struct passwd
{
char *pw_name; /* 用户登录名 */
char *pw_passwd; /* 密码(加密后)*/
__uid_t pw_uid; /* 用户ID */
__gid_t pw_gid; /* 组ID */
char *pw_gecos; /* 详细用户名 */
char *pw_dir; /* 用户目录 */
char *pw_shell; /* Shell程序名 */
};
struct passwd * getpwuid(uid_t uid);
当您知道使用者的uid(user id)时,可以透过getpwuid来得知所有关於该使用者的相关资讯。
struct passwd * getpwnam(char * name);
当您知道使用者名称时,可以透过getpwnam来得知所有关於该使用者的相关资讯。
int getpw(uid_t uid, char *buf);
当您仅需要取得使用者的密码进行比对时,可以使用getpw。
struct rlimit rlim;
在Linux系统中,Resouce limit指在一个进程的执行过程中,它所能得到的资源的限制,比如进程的core file的最大值,虚拟内存的最大值等。
Resouce limit的大小可以直接影响进程的执行状况。其有两个最重要的概念:soft limit 和 hard limit。
struct rlimit {
rlim_t rlim_cur;
rlim_t rlim_max;
};
soft limit是指内核所能支持的资源上限。比如对于RLIMIT_NOFILE(一个进程能打开的最大文件 数,内核默认是1024),soft limit最大也只能达到1024。对于RLIMIT_CORE(core文件的大小,内核不做限制),soft limit最大能是unlimited。
hard limit在资源中只是作为soft limit的上限。当你设置hard limit后,你以后设置的soft limit只能小于hard limit。要说明的是,hard limit只针对非特权进程,也就是进程的有效用户ID(effective user ID)不是0的进程。具有特权级别的进程(具有属性CAP_SYS_RESOURCE),soft limit则只有内核上限。
usage:ulimit [-SHacdefilmnpqrstuvx [limit]]
当不指定limit的时候,该命令显示当前值。这里要注意的是,当你要修改limit的时候,如果不指定-S或者-H,默认是同时设置soft limit和hard limit。也就是之后设置时只能减不能增。所以,建议使用ulimit设置limit参数是加上-S。
getrlimit和setrlimit的使用也很简单,manpage里有很清楚的描述。
int getrlimit(int resource, struct rlimit *rlim);
int setrlimit(int resource, const struct rlimit *rlim);
需要注意的是你在setrlimit,需要检查是否成功来判断新值有没有超过hard limit。如下例:
if (getrlimit(RLIMIT_CORE, &rlim)==0) {
rlim_new.rlim_cur = rlim_new.rlim_max = RLIM_INFINITY;
if (setrlimit(RLIMIT_CORE, &rlim_new)!=0) {
rlim_new.rlim_cur = rlim_new.rlim_max =
rlim.rlim_max;
(void) setrlimit(RLIMIT_CORE, &rlim_new);
}
}
enum hashfunc_type hash_type = JENKINS_HASH; 这里说明使用memcached 采用的hash 函数是Bob Jenkins 先生在1996 创立的一个算法,复杂度为O(6n+35),而且冲突率极低,该算法具体过程可以参阅这里。冲突处理的方法为开链法。详解地址:http://blog.csdn.net/linxuping/article/details/21474995
if (!sanitycheck()) {
return EX_OSERR;
}
static bool sanitycheck(void) {
/* One of our biggest problems is old and bogus libevents */
const char *ever = event_get_version();
if (ever != NULL) {
if (strncmp(ever, "1.", 2) == 0) {
/* Require at least 1.3 (that's still a couple of years old) */
if ((ever[2] == '1' || ever[2] == '2') && !isdigit(ever[3])) {
fprintf(stderr, "You are using libevent %s.\nPlease upgrade to"
" a more recent version (1.3 or newer)\n",
event_get_version());
return false;
}
}
}
return true;
}
说明借用libevent库进行网络控制,且版本必须1.3及以上,否则程序退出。
/* handle SIGINT and SIGTERM */
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
static void sig_handler(const int sig) {
printf("Signal handled: %s.\n", strsignal(sig));
exit(EXIT_SUCCESS);
}
如果遇到SIGINT、SIGTERM信号程序退出
settings_init();
初始化struct setting结构体
static void settings_init(void) {
//开启CAS业务,如果开启了那么在item里面就会多一个用于CAS的字段。可以在启动memcached的时候通过-C选项禁用
settings.use_cas = true;
settings.access = 0700; //unix socket的权限位信息
settings.port = 11211;//memcached监听的tcp端口
settings.udpport = 11211;//memcached监听的udp端口
//memcached绑定的ip地址。如果该值为NULL,那么就是INADDR_ANY。否则该值指向一个ip字符串
settings.inter = NULL;
settings.maxbytes = 64 * 1024 * 1024; //memcached能够使用的最大内存
settings.maxconns = 1024; //最多允许多少个客户端同时在线。不同于settings.backlog
settings.verbose = 0;//运行信息的输出级别.该值越大输出的信息就越详细
settings.oldest_live = 0; //flush_all命令的时间界限。插入时间小于这个时间的item删除。
settings.evict_to_free = 1; //标记memcached是否允许LRU淘汰机制。默认是可以的。可以通过-M选项禁止
settings.socketpath = NULL;//unix socket监听的socket路径.默认不使用unix socket
settings.factor = 1.25; //item的扩容因子
settings.chunk_size = 48; //最小的一个item能存储多少字节的数据(set、add命令中的数据)
settings.num_threads = 4; //worker线程的个数
//多少个worker线程为一个udp socket服务 number of worker threads serving each udp socket
settings.num_threads_per_udp = 0;
settings.prefix_delimiter = ':'; //分隔符
settings.detail_enabled = 0;//是否自动收集状态信息
//worker线程连续为某个客户端执行命令的最大命令数。这主要是为了防止一个客户端霸占整个worker线程
//,而该worker线程的其他客户端的命令无法得到处理
settings.reqs_per_event = 20;
settings.backlog = 1024;//listen函数的第二个参数,不同于settings.maxconns
//用户命令的协议,有文件和二进制两种。negotiating_prot是协商,自动根据命令内容判断
settings.binding_protocol = negotiating_prot;
settings.item_size_max = 1024 * 1024;//slab内存页的大小。单位是字节
settings.maxconns_fast = false;//如果连接数超过了最大同时在线数(由-c选项指定),是否立即关闭新连接上的客户端。
//用于指明memcached是否启动了LRU爬虫线程。默认值为false,不启动LRU爬虫线程。
//可以在启动memcached时通过-o lru_crawler将变量的值赋值为true,启动LRU爬虫线程
settings.lru_crawler = false;
settings.lru_crawler_sleep = 100;//LRU爬虫线程工作时的休眠间隔。单位为微秒
settings.lru_crawler_tocrawl = 0; //LRU爬虫检查每条LRU队列中的多少个item,如果想让LRU爬虫工作必须修改这个值
//哈希表的长度是2^n。这个值就是n的初始值。可以在启动memcached的时候通过-o hashpower_init
//设置。设置的值要在[12, 64]之间。如果不设置,该值为0。哈希表的幂将取默认值16
settings.hashpower_init = 0; /* Starting hash power level */
settings.slab_reassign = false;//是否开启调节不同类型item所占的内存数。可以通过 -o slab_reassign选项开启
settings.slab_automove = 0;//自动检测是否需要进行不同类型item的内存调整,依赖于settings.slab_reassign的开启
settings.shutdown_command = false;//是否支持客户端的关闭命令,该命令会关闭memcached进程
//用于修复item的引用数。如果一个worker线程引用了某个item,还没来得及解除引用这个线程就挂了
//那么这个item就永远被这个已死的线程所引用而不能释放。memcached用这个值来检测是否出现这种
//情况。因为这种情况很少发生,所以该变量的默认值为0(即不进行检测)。
//在启动memcached时,通过-o tail_repair_time xxx设置。设置的值要大于10(单位为秒)
//TAIL_REPAIR_TIME_DEFAULT 等于 0。
settings.tail_repair_time = TAIL_REPAIR_TIME_DEFAULT;
settings.flush_enabled = true;//是否运行客户端使用flush_all命令
}
static void settings_init(void) {
settings.use_cas = true; //默认使用CAS协议,详解见:http://www.pigg.co/memcached-cas-detail.html
settings.access = 0700;
settings.port = 11211; //Memcached服务端口
settings.udpport = 11211;
/* By default this string should be NULL for getaddrinfo() */
settings.inter = NULL;
settings.maxbytes = 64 * 1024 * 1024; /* default is 64MB */
settings.maxconns = 1024; /* to limit connections-related memory to about 5MB */
settings.verbose = 0; /用于显示错误、警告信息标志
settings.oldest_live = 0;
settings.oldest_cas = 0; /* supplements accuracy of oldest_live */
settings.evict_to_free = 1; /* push old items out of cache when memory runs out */
settings.socketpath = NULL; /* by default, not using a unix socket */
settings.factor = 1.25;
settings.chunk_size = 48; /* space for a modest key and value */
settings.num_threads = 4; /* N workers */
settings.num_threads_per_udp = 0;
settings.prefix_delimiter = ':';
settings.detail_enabled = 0;
settings.reqs_per_event = 20;
settings.backlog = 1024;
settings.binding_protocol = negotiating_prot;
settings.item_size_max = 1024 * 1024; /* The famous 1MB upper limit. */
settings.maxconns_fast = false;
settings.lru_crawler = false;
settings.lru_crawler_sleep = 100;
settings.lru_crawler_tocrawl = 0;
settings.lru_maintainer_thread = false;
settings.hot_lru_pct = 32;
settings.warm_lru_pct = 32;
settings.expirezero_does_not_evict = false;
settings.hashpower_init = 0;
settings.slab_reassign = false;
settings.slab_automove = 0;
settings.shutdown_command = false;
settings.tail_repair_time = TAIL_REPAIR_TIME_DEFAULT;
settings.flush_enabled = true;
settings.crawls_persleep = 1000;
}
struct settings {
size_t maxbytes;
int maxconns;
int port;
int udpport;
char *inter;
int verbose;
rel_time_t oldest_live; /* ignore existing items older than this */
uint64_t oldest_cas; /* ignore existing items with CAS values lower than this */
int evict_to_free;
char *socketpath; /* path to unix socket if using local socket */
int access; /* access mask (a la chmod) for unix domain socket */
double factor; /* chunk size growth factor */
int chunk_size;
int num_threads; /* number of worker (without dispatcher) libevent threads to run */
int num_threads_per_udp; /* number of worker threads serving each udp socket */
char prefix_delimiter; /* character that marks a key prefix (for stats) */
int detail_enabled; /* nonzero if we're collecting detailed stats */
int reqs_per_event; /* Maximum number of io to process on each
io-event. */
bool use_cas;
enum protocol binding_protocol;
int backlog;
int item_size_max; /* Maximum item size, and upper end for slabs */
bool sasl; /* SASL on/off */
bool maxconns_fast; /* Whether or not to early close connections */
bool lru_crawler; /* Whether or not to enable the autocrawler thread */
bool lru_maintainer_thread; /* LRU maintainer background thread */
bool slab_reassign; /* Whether or not slab reassignment is allowed */
int slab_automove; /* Whether or not to automatically move slabs */
int hashpower_init; /* Starting hash power level */
bool shutdown_command; /* allow shutdown command */
int tail_repair_time; /* LRU tail refcount leak repair time */
bool flush_enabled; /* flush_all enabled */
char *hash_algorithm; /* Hash algorithm in use */
int lru_crawler_sleep; /* Microsecond sleep between items */
uint32_t lru_crawler_tocrawl; /* Number of items to crawl per run */
int hot_lru_pct; /* percentage of slab space for HOT_LRU */
int warm_lru_pct; /* percentage of slab space for WARM_LRU */
int crawls_persleep; /* Number of LRU crawls to run before sleeping */
bool expirezero_does_not_evict; /* exptime == 0 goes into NOEXP_LRU */
};
Memcached 参数
memcached是使用getopt和getsubopt解析命令行参数的,下面给出的这些命令行选项与这两个解析函数有关:凡是选项后面有冒号的就表示这个选项必须要有一个参数;没有冒号的就没有参数;memcached没有使用双冒号的选项。关于getopt和getsubopt的具体使用可以参考《getopt和getsubopt命令行解析函数》。
- "A" 是否运行客户端使用shutdown命令。默认是不允许的。该选项将允许。客户端的shutdown命令会将memcached进程杀死。该选项会将settings.shutdown_command赋值为false
- "a:" unix socket的权限位信息(访问掩码)。该选项的参数赋值给settings.access
- "U:" 大写U。memcached监听的UDP端口值,默认端口为11211。该选项的参数赋值给settings.udpport
- "p:" 小写p,memcached监听的tcp端口。默认端口为11211, 该选项的参数赋值给settings.port
- "s:" 小写S。unix socket监听的socket路径。该选项的参数赋值给settings.socketpath
- "m:" 小写m。memcached能够使用的最大内存值,默认是64MB。参数单位为MB。该参数赋值给settings.maxbytes
- "M" 大写M。默认情况下,当memcached的内存使用完后,将进行LRU机制淘汰item以腾出空间。如果使用本选项那么将关闭LRU功能。当然关闭LRU不代表不能存储新数据。如果memcached里面存有过期失效的item,那么就可以存储新数据。否则将无法存储。该选项将settings.evict_to_free赋值为0。
- "c:" 小写c。最多允许多少个客户端同时在线(这个值不等价于listen函数的第二个参数),该选项和后面的b选项有所不同。 默认值为1024个。该选项参数赋值给settings.maxconns。
- "h" 显示帮助信息
- "i" 显示memcached和libevent的版权信息
- "k" 小写k。将memcached使用到的内存锁定在内存中,不准OS把memcached的内存移动到虚拟内存。因为当OS把memcached的内存移动到虚拟内存可能会导致页错误,降低memcached的响应时间
- "v" 小写v。输出memcached运行时的一些信息。-v -vv -vvv输出的信息依次增加。该选项会增加settings.verbose的值
- "l:" 小写L。memcached绑定的ip地址。如果不设置这个选项,那么memcached将使用INADDR_ANY。如果想指定多个IP地址,那么该选项的参数可以由多个ip组成,ip之间用逗号分隔。也可以多次使用这个选项,此时端口应该尾随ip而不是单独用-p选项指定。例如-l 127.0.0.1:8888,192.168.1.112:9999 或者 -l 127.0.0.1:8888 -l 192.168.1.112:9999该选项参数将赋值给settings.inter
- "d" 以守护进程的形式运行memcached
- "r" 将core文件大小设置为不受限制
- "R:" worker线程连续为某个客户端执行命令的最大命令数。该选项的参数赋值给settings.reqs_per_event
- "u:" 小写u。当以root用户启动memcached的时候需要指定memcached的所属用户,其他用户启动memcached不需要此选项
- "P:" 大写p。该选项的参数指明memcached的pid保存文件。要和-d选项配合使用。注意运行的用户是否有权限写对应的文件
- "f:" item的扩容因子。默认值为1.25。该选项的参数值可以是小数但必须大于1.0。该选项参数将赋值给settings.factor
- "n:" 设置最小的item能存储多少字节的数据。该选项参数赋值给settings.chunk_size
- "t:" 该选项的参数用于指定worker线程的个数,不建议超过64个。如果不设置该选项默认有4个线程。该参数会赋值给settings.num_threads
- "D:" 参数字符作为前缀和ID的分隔符。使用了该选项才会自动收集状态信息。也可以在启动memcached后,客户端使用stats detail on命令开启,此时默认的分隔符为冒号":"。该选项参数会赋值为settings.prefix_delimiter,并将settings.detail_enabled赋值为1
- "L" 如果OS允许的话,那么向OS申请更大的内存页。OS的默认内存页为4KB。大的内存页可以有效降低页表的大小,提高效率。此选项会使得memcached预先先OS全部所需的申请内存。当然这些内存尽量是用大内存页分配的
- "C:" 大写C。memcached默认是使用CAS的,本选项是禁用CAS。本选项会将settings.use_cas赋值为false
- "b:" listen函数的第二个参数。该选项的参数赋值给settings.backlog。如果不设置该选项,那么默认为1024。该选项和前面的c选项有所不同
- "B:" memcached支持文本协议和二进制协议。该选项的参数用于指定使用的协议。默认情况下是根据客户端的命令而自动判断(也叫协商),参数只能取auto、binary、ascii这三个字符串值。将参数将赋值给settings.binding_protocol
- "I:" 大写i。slab分配器中,每一个页的大小。这个选项的参数是一个数值表示页的大小。默认单位是B也可以在数值后面带K或者M(大小写都行),表示KB和MB。页的大小小于1KB或者大于128MB都是不允许的。不推荐使用该选项。本选项参数会赋值给settings.item_size_max
- "S" 大写S。打开sasl安全协议。会将settings.sasl赋值为true
- "F" 禁止客户端的flush_all命令。默认是允许客户端的flush_all命令的。该选项将settings.flush_enabled赋值为false
- "o:" 小写o。有下面几个子选项可以设置。这个选项是用来优化的
- maxconns_fast: 如果连接数超过了最大同时在线数(由-c选项指定),立即关闭新连接上的客户端。该选项将settings.maxconns_fast赋值为true
- hashpower: 哈希表的长度是2^n。可以通过选项hashpower设置指数n的初始值。如果不设置将取默认值16。该选项必须有参数,参数取值范围只能为[12, 64]。本选项参数值赋值给settings.hashpower_init
- slab_reassign: 该选项没有参数。用于调节不同类型的item所占的内存。不同类型是指大小不同。某一类item已经很少使用了,但仍占用着内存。可以通过开启slab_reassign调度内存,减少这一类item的内存。如果使用了本选项,settings.slab_reassign赋值为true
- slab_automove: 依赖于slab_reassign。用于主动检测是否需要进行内存调度。该选项的参数是可选的。参数的取值范围只能为0、1、2。参数2是不建议的。本选项参数赋值给settings.slab_automove。如果本选项没有参数,那么settings.slab_automove赋值为1
- hash_algorithm: 用于指定哈希算法。该选项必须带有参数。并且参数只能是字符串jenkins或者murmur3
- tail_repair_time: 用于检测是否有item被已死线程所引用。一般不会出现这种情况,所以默认不开启这种检测。如果需要开启这种检测,那么需要使用本选项。本选项需要一个参数,参数值必须不小于10。该参数赋值给settings.tail_repair_time
- lru_crawler: 本选项用于启动LRU爬虫线程。该选项不需要参数。本选项会导致settings.lru_crawler赋值为true
- lru_crawler_sleep: LRU爬虫线程工作时的休眠间隔。本选项需要一个参数作为休眠时间,单位为微秒,取值范围是[0, 1000000]。该参数赋值给settings.lru_crawler_sleep
- lru_crawler_tocrawl: LRU爬虫检查每条LRU队列中的多少个item。该选项带有一个参数。参数会赋值给settings.lru_crawler_tocrawl
init_lru_crawler();
init_lru_maintainer();
初始化队列信息,用于内存管理详见item.c
/* set stderr non-buffering (for running under, say, daemontools) */
setbuf(stderr, NULL);
是linux中的
C函数
,主要用于打开和关闭缓冲机制.
while (-1 != (c = getopt(argc, argv,
"a:" /* access mask for unix socket */
"A" /* enable admin shutdown commannd */
"p:" /* TCP port number to listen on */
"s:" /* unix socket path to listen on */
"U:" /* UDP port number to listen on */
"m:" /* max memory to use for items in megabytes */
"M" /* return error on memory exhausted */
"c:" /* max simultaneous connections */
"k" /* lock down all paged memory */
"hiV" /* help, licence info, version */
"r" /* maximize core file limit */
"v" /* verbose */
"d" /* daemon mode */
"l:" /* interface to listen on */
"u:" /* user identity to run as */
"P:" /* save PID in file */
"f:" /* factor? */
"n:" /* minimum space allocated for key+value+flags */
"t:" /* threads */
"D:" /* prefix delimiter? */
"L" /* Large memory pages */
"R:" /* max requests per event */
"C" /* Disable use of CAS */
"b:" /* backlog queue limit */
"B:" /* Binding protocol */
"I:" /* Max item size */
"S" /* Sasl ON */
"F" /* Disable flush_all */
"o:" /* Extended generic options */
))) {
启动Memcached时输入各项参数,以下是对Memcached各个参数的赋值。
if (hash_init(hash_type) != 0) {
fprintf(stderr, "Failed to initialize hash_algorithm!\n");
exit(EX_USAGE);
}
hash算法的实现方法选择
if (maxcore != 0) {
struct rlimit rlim_new;
/*
* First try raising to infinity; if that fails, try bringing
* the soft limit to the hard.
*/
if (getrlimit(RLIMIT_CORE, &rlim) == 0) {
rlim_new.rlim_cur = rlim_new.rlim_max = RLIM_INFINITY;
if (setrlimit(RLIMIT_CORE, &rlim_new)!= 0) {
/* failed. try raising just to the old max */
rlim_new.rlim_cur = rlim_new.rlim_max = rlim.rlim_max;
(void)setrlimit(RLIMIT_CORE, &rlim_new);
}
}
/*
* getrlimit again to see what we ended up with. Only fail if
* the soft limit ends up 0, because then no core files will be
* created at all.
*/
if ((getrlimit(RLIMIT_CORE, &rlim) != 0) || rlim.rlim_cur == 0) {
fprintf(stderr, "failed to ensure corefile creation\n");
exit(EX_OSERR);
}
}
/*
* If needed, increase rlimits to allow as many connections
* as needed.
*/
if (getrlimit(RLIMIT_NOFILE, &rlim) != 0) {
fprintf(stderr, "failed to getrlimit number of files\n");
exit(EX_OSERR);
} else {
rlim.rlim_cur = settings.maxconns;
rlim.rlim_max = settings.maxconns;
if (setrlimit(RLIMIT_NOFILE, &rlim) != 0) {
fprintf(stderr, "failed to set rlimit for open files. Try starting as root or requesting smaller maxconns value.\n");
exit(EX_OSERR);
}
}
设置系统资源
/* lose root privileges if we have them */
if (getuid() == 0 || geteuid() == 0) {
if (username == 0 || *username == '\0') {
fprintf(stderr, "can't run as root without the -u switch\n");
exit(EX_USAGE);
}
if ((pw = getpwnam(username)) == 0) {
fprintf(stderr, "can't find the user %s to switch to\n", username);
exit(EX_NOUSER);
}
if (setgid(pw->pw_gid) < 0 || setuid(pw->pw_uid) < 0) {
fprintf(stderr, "failed to assume identity of user %s\n", username);
exit(EX_OSERR);
}
}
设置用户及权限
/* Initialize Sasl if -S was specified */
if (settings.sasl) {
init_sasl();
}
设置是否启用Sasl验证功能,详见:http://lguan.iteye.com/blog/1279537
case 'S': /* set Sasl authentication to true. Default is false */
#ifndef ENABLE_SASL
fprintf(stderr, "This server is not built with SASL support.\n");
exit(EX_USAGE);
#endif
settings.sasl = true;
break;
if (do_daemonize) {
if (sigignore(SIGHUP) == -1) {
perror("Failed to ignore SIGHUP");
}
if (daemonize(maxcore, settings.verbose) == -1) {
fprintf(stderr, "failed to daemon() in order to daemonize\n");
exit(EXIT_FAILURE);
}
}
设置后台运行程序
/* initialize main thread libevent instance */
main_base = event_init();
初始化main_base,利用libevent网络核心库
/* initialize other stuff */
stats_init();
assoc_init(settings.hashpower_init);
conn_init();
slabs_init(settings.maxbytes, settings.factor, preallocate);
继续初始化其他的东西,Memcached的变量很多
/*
* ignore SIGPIPE signals; we can use errno == EPIPE if we
* need that information
*/
if (sigignore(SIGPIPE) == -1) {
perror("failed to ignore SIGPIPE; sigaction");
exit(EX_OSERR);
}
忽略SIGPIPE信号,防止程序服务端无故退出。尤其是在断网的情况下,
/* start up worker threads if MT mode */
memcached_thread_init(settings.num_threads, main_base);
if (start_assoc_maintenance_thread() == -1) {
exit(EXIT_FAILURE);
}
if (start_lru_crawler && start_item_crawler_thread() != 0) {
fprintf(stderr, "Failed to enable LRU crawler thread\n");
exit(EXIT_FAILURE);
}
if (start_lru_maintainer && start_lru_maintainer_thread() != 0) {
fprintf(stderr, "Failed to enable LRU maintainer thread\n");
return 1;
}
if (settings.slab_reassign &&
start_slab_maintenance_thread() == -1) {
exit(EXIT_FAILURE);
}
启动程序的各个线程。
/* initialise clock event */
clock_handler(0, 0, 0);
/* libevent uses a monotonic clock when available for event scheduling. Aside
* from jitter, simply ticking our internal timer here is accurate enough.
* Note that users who are setting explicit dates for expiration times *must*
* ensure their clocks are correct before starting memcached. */
static void clock_handler(const int fd, const short which, void *arg) {
struct timeval t = {.tv_sec = 1, .tv_usec = 0};
static bool initialized = false;
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
static bool monotonic = false;
static time_t monotonic_start;
#endif
if (initialized) {
/* only delete the event if it's actually there. */
evtimer_del(&clockevent);
} else {
initialized = true;
/* process_started is initialized to time() - 2. We initialize to 1 so
* flush_all won't underflow during tests. */
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
monotonic = true;
monotonic_start = ts.tv_sec - ITEM_UPDATE_INTERVAL - 2;
}
#endif
}
evtimer_set(&clockevent, clock_handler, 0);
event_base_set(main_base, &clockevent);
evtimer_add(&clockevent, &t);
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
if (monotonic) {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
return;
current_time = (rel_time_t) (ts.tv_sec - monotonic_start);
return;
}
#endif
{
struct timeval tv;
gettimeofday(&tv, NULL);
current_time = (rel_time_t) (tv.tv_sec - process_started);
}
}
初始化时钟。
/* create unix mode sockets after dropping privileges */
if (settings.socketpath != NULL) {
errno = 0;
if (server_socket_unix(settings.socketpath,settings.access)) {
vperror("failed to listen on UNIX socket: %s", settings.socketpath);
exit(EX_OSERR);
}
}
/* create the listening socket, bind it, and init */
if (settings.socketpath == NULL) {
const char *portnumber_filename = getenv("MEMCACHED_PORT_FILENAME");
char temp_portnumber_filename[PATH_MAX];
FILE *portnumber_file = NULL;
if (portnumber_filename != NULL) {
snprintf(temp_portnumber_filename,
sizeof(temp_portnumber_filename),
"%s.lck", portnumber_filename);
portnumber_file = fopen(temp_portnumber_filename, "a");
if (portnumber_file == NULL) {
fprintf(stderr, "Failed to open \"%s\": %s\n",
temp_portnumber_filename, strerror(errno));
}
}
errno = 0;
if (settings.port && server_sockets(settings.port, tcp_transport,
portnumber_file)) {
vperror("failed to listen on TCP port %d", settings.port);
exit(EX_OSERR);
}
/*
* initialization order: first create the listening sockets
* (may need root on low ports), then drop root if needed,
* then daemonise if needed, then init libevent (in some cases
* descriptors created by libevent wouldn't survive forking).
*/
/* create the UDP listening socket and bind it */
errno = 0;
if (settings.udpport && server_sockets(settings.udpport, udp_transport,
portnumber_file)) {
vperror("failed to listen on UDP port %d", settings.udpport);
exit(EX_OSERR);
}
if (portnumber_file) {
fclose(portnumber_file);
rename(temp_portnumber_filename, portnumber_filename);
}
}
创建服务端socket,也设置了libevent事件处理
/* enter the event loop */
if (event_base_loop(main_base, 0) != 0) {
retval = EXIT_FAILURE;
}
主服务器开始循环监听客户端的消息。