Refer:
《深入剖析Nginx》 Chapter 4.1 内存池
《深入理解Nginx–模块开发与架构解析》 Chapter 8.2.6 内存池设计、8.7 ngx_pool_t 内存池
Nginx源码版本:
nginx-1.11.6
Nginx以高效、低内存消耗等特点著称。这些特点体现在了Nginx实现中的方方面面。如内存池(ngx_pool_t)的设计,通过预分配一大块内存,作为内存池,当需要申请的内存比较小时,即小块内存,则直接从内存池中已经分配好的内存中进行申请;当申请大块内存时,则直接从堆上进行分配,并挂载在内存池的管理结构中。通过这样的设计,减少了向操作系统申请内存的次数,既减少了系统调用的开销,又可以尽量避免出现内存碎片,其内部采取的内存对齐,还能进一步提高IO效率;同时,通过统一管理内存池的分配和释放,降低了模块开发的复杂度。当然,因为需要存储内存池管理结构,这里必然会损耗一些内存用于此。whatever, 同内存池带来的优点相比,因存储管理结构而带来的内存损耗几乎可以忽略不计。下面我们来详细分析分析,Nginx内存池到底是怎样实现的:
为什么要设计内存池:
1、尽量避免出现内存碎片;
2、减少向操作系统申请内存的次数;
(把需要多次向系统申请内存的操作整合成一次,大大减少了CPU资源的消耗【系统调用是非常耗资源的】)
3、降低各模块开发的复杂度。
(模块开发者只需要关心内存的分配,而释放则交给内存池来负责。
Nginx为每一个TCP连接都分配了一个内存池,
HTTP框架为每一个HTTP请求都分配了一个内存池【在请求结束时销毁整个内存池,把曾经分配的内存一次性归还给操作系统】)
因此Nginx设计了简单的内存池。
分析nginx内存池ngx_pool_t的实现,我们相应的数据结构和API接口入手:
typedef struct ngx_pool_s ngx_pool_t;
struct ngx_pool_s {
ngx_pool_data_t d; // 内存池内存块,当分配小块内存时,剩余的预分配空间不足时,会再分配1个ngx_pool_t,它们通过d中的next成员构成单链表
size_t max; // 内存池内存块的最大值,用以评估所申请内存属于小块还是大块的标准
ngx_pool_t *current; // 多个小块内存池构成链表时,current指向分配内存时遍历的第1个小块内存池
ngx_chain_t *chain; //
ngx_pool_large_t *large; // 用于挂载大块内存(size > max)的链表
ngx_pool_cleanup_t *cleanup; // 释放内存的相关handler(注意:ngx_pool_t不只希望程序员不用释放内存,而且还能不需要释放如文件等资源)
// 所有待清理资源(例如需要关闭或者删除的文件)以ngx_pool_cleanup_t对象构成单链表,挂在cleanup成员上
ngx_log_t *log; // 内存池执行中输出日志的对象
};
typedef struct {
u_char *last; // 指向未分配的空闲内存的首地址
u_char *end; // 指向当前小块内存池的尾部
ngx_pool_t *next; // 同属于一个pool的多个小块内存池间,通过next相连
ngx_uint_t failed; // 当剩余空间不足以分配出小块内存时,failed成员加1,
// 当failed成员大于4后,ngx_pool_t的current将移向下一个小块内存
} ngx_pool_data_t;
关于ngx_pool_t相关的数据结构以及其字段相关意义,见下图:
特别鸣谢:
图片来源:http://www.cnblogs.com/doop-ymc/p/3418514.html
关键函数源码分析:
nginx-1.11.6/src/core/ngx_palloc.c
/*
* 创建内存池。
* 注意,这里的size参数并不等同于可分配空间,还需要刨去管理结构的大小:size = size - sizeof(ngx_pool_t);
* 同时也意味着,size绝不能小于sizeof(ngx_pool_t),否则就会有内存越界错误发生。
* #define NGX_DEFAULT_POOL_SIZE (16 * 1024)
* 可以设置size为NGX_DEFAULT_POOL_SIZE,为16KB
*/
18 ngx_pool_t *
19 ngx_create_pool(size_t size, ngx_log_t *log)
20 {
21 ngx_pool_t *p;
22
// #define NGX_POOL_ALIGNMENT 16
23 p = ngx_memalign(NGX_POOL_ALIGNMENT, size, log);
24 if (p == NULL) {
25 return NULL;
26 }
27
// 内存池中的每块内存块开始的部分都用来存放其相应的管理结构:ngx_pool_t && ngx_pool_data_t
28 p->d.last = (u_char *) p + sizeof(ngx_pool_t);
29 p->d.end = (u_char *) p + size;
30 p->d.next = NULL;
31 p->d.failed = 0;
32
33 size = size - sizeof(ngx_pool_t);
/*
* #define NGX_MAX_ALLOC_FROM_POOL (ngx_pagesize - 1)
* NGX_MAX_ALLOC_FROM_POOL should be (ngx_pagesize - 1), i.e. 4095 byte on x86.
* 这里再限制一下,哪怕当前内存池的size比较的大,当 申请内存的大小 >= ngx_pagesize 时,
* 也认为是需要申请大块内存,不直接从该内存池中分配,而是从进程的堆中进行分配好,然后挂载在large链表下
* 当然,当当前内存池的size比较的小时,需要申请的内存大于size - sizeof(ngx_pool_t);,必然也是需要申请大块内存的
*/
34 p->max = (size < NGX_MAX_ALLOC_FROM_POOL) ? size : NGX_MAX_ALLOC_FROM_POOL;
35
36 p->current = p;
37 p->chain = NULL;
38 p->large = NULL;
39 p->cleanup = NULL;
40 p->log = log;
41
42 return p;
43 }
/*
* 销毁内存池。
* 会把通过该pool分配出的内存释放(小块内存 && 大块内存);
* 同时还会执行通过ngx_pool_cleanup_add方法添加的各类资源清理方法来清理各种内存。
*/
46 void
47 ngx_destroy_pool(ngx_pool_t *pool)
48 {
49 ngx_pool_t *p, *n;
50 ngx_pool_large_t *l;
51 ngx_pool_cleanup_t *c;
52
/*
* struct ngx_pool_cleanup_s {
* ngx_pool_cleanup_pt handler;
* void *data;
* ngx_pool_cleanup_t *next;
* };
*
* 先释放待清理资源,内存也好,文件句柄等资源也好。
*/
53 for (c = pool->cleanup; c; c = c->next) {
54 if (c->handler) {
55 ngx_log_debug1(NGX_LOG_DEBUG_ALLOC, pool->log, 0,
56 "run cleanup: %p", c);
57 c->handler(c->data);
58 }
59 }
60
61 #if (NGX_DEBUG)
62
63 /*
64 * we could allocate the pool->log from this pool
65 * so we cannot use this log while free()ing the pool
66 */
67
68 for (l = pool->large; l; l = l->next) {
69 ngx_log_debug1(NGX_LOG_DEBUG_ALLOC, pool->log, 0, "free: %p", l->alloc);
70 }
71
72 for (p = pool, n = pool->d.next; /* void */; p = n, n = n->d.next) {
73 ngx_log_debug2(NGX_LOG_DEBUG_ALLOC, pool->log, 0,
74 "free: %p, unused: %uz", p, p->d.end - p->d.last);
75
76 if (n == NULL) {
77 break;
78 }
79 }
80
81 #endif
82
// 释放大块内存
83 for (l = pool->large; l; l = l->next) {
84 if (l->alloc) {
85 ngx_free(l->alloc);
86 }
87 }
88
// 释放小块内存
89 for (p = pool, n = pool->d.next; /* void */; p = n, n = n->d.next) {
90 ngx_free(p);
91
92 if (n == NULL) {
93 break;
94 }
95 }
96 }
/*
* 重置内存池。
* 释放大块内存;
* 小块内存不释放,但是复用。
*/
99 void
100 ngx_reset_pool(ngx_pool_t *pool)
101 {
102 ngx_pool_t *p;
103 ngx_pool_large_t *l;
104
// 释放大块内存
105 for (l = pool->large; l; l = l->next) {
106 if (l->alloc) {
107 ngx_free(l->alloc);
108 }
109 }
110
// 小块内存复用
111 for (p = pool; p; p = p->d.next) {
112 p->d.last = (u_char *) p + sizeof(ngx_pool_t);
113 p->d.failed = 0;
114 }
115
116 pool->current = pool;
117 pool->chain = NULL;
118 pool->large = NULL;
119 }
122 void *
123 ngx_palloc(ngx_pool_t *pool, size_t size)
124 {
125 #if !(NGX_DEBUG_PALLOC)
// 分配小块内存
126 if (size <= pool->max) {
127 return ngx_palloc_small(pool, size, 1);
128 }
129 #endif
130
// 分配大块内存
131 return ngx_palloc_large(pool, size);
132 }
148 static ngx_inline void *
149 ngx_palloc_small(ngx_pool_t *pool, size_t size, ngx_uint_t align)
150 {
151 u_char *m;
152 ngx_pool_t *p;
153
154 p = pool->current;
155
156 do {
157 m = p->d.last;
158
159 if (align) {
/*
* #define ngx_align_ptr(p, a) \
* (u_char *) (((uintptr_t) (p) + ((uintptr_t) a - 1)) & ~((uintptr_t) a - 1))
* #define NGX_ALIGNMENT sizeof(unsigned long) /* platform word */
*
* 将地址m对齐到NGX_ALIGNMENT
*/
160 m = ngx_align_ptr(m, NGX_ALIGNMENT);
161 }
162
/*
* 当前小块内存够分配的,就在当前内存中分配,然后return m;
* 当前小块内存不够分配的,就继续【while(p)】沿着next链表找下去,找到能分配的小块内存,分配并return
* 没能找到可以分配的小块内存,则直接从堆上申请一个小块内存池来进行分配:return ngx_palloc_block(pool, size);
*/
163 if ((size_t) (p->d.end - m) >= size) {
164 p->d.last = m + size;
165
166 return m;
167 }
168
169 p = p->d.next;
170
171 } while (p);
172
173 return ngx_palloc_block(pool, size);
174 }
177 static void *
178 ngx_palloc_block(ngx_pool_t *pool, size_t size)
179 {
180 u_char *m;
181 size_t psize;
182 ngx_pool_t *p, *new;
183
184 psize = (size_t) (pool->d.end - (u_char *) pool);
185
// 从堆上申请一个小内存块,大小和之前的大小一样,见上一行代码
186 m = ngx_memalign(NGX_POOL_ALIGNMENT, psize, pool->log);
187 if (m == NULL) {
188 return NULL;
189 }
190
191 new = (ngx_pool_t *) m;
192
193 new->d.end = m + psize;
194 new->d.next = NULL;
195 new->d.failed = 0;
196
// 这里新申请的小内存块的管理结构不再是:ngx_pool_t,而是用ngx_pool_data_t就足够了
197 m += sizeof(ngx_pool_data_t);
198 m = ngx_align_ptr(m, NGX_ALIGNMENT);
199 new->d.last = m + size;
200
// 之前的failed没有++,放在了这里
201 for (p = pool->current; p->d.next; p = p->d.next) {
202 if (p->d.failed++ > 4) {
203 pool->current = p->d.next;
204 }
205 }
206
207 p->d.next = new;
208
209 return m;
210 }
213 static void *
214 ngx_palloc_large(ngx_pool_t *pool, size_t size)
215 {
216 void *p;
217 ngx_uint_t n;
218 ngx_pool_large_t *large;
219
// 从堆上分配所需要的size内存
220 p = ngx_alloc(size, pool->log);
221 if (p == NULL) {
222 return NULL;
223 }
224
225 n = 0;
226
// 有可复用的ngx_pool_large_t,则复用
227 for (large = pool->large; large; large = large->next) {
228 if (large->alloc == NULL) {
229 large->alloc = p;
230 return p;
231 }
232
// 查找是否有可复用的ngx_pool_large_t,最多查找3次
233 if (n++ > 3) {
234 break;
235 }
236 }
237
// 没有可复用的ngx_pool_large_t,则从当前pool中分配一个,并挂载好
238 large = ngx_palloc_small(pool, sizeof(ngx_pool_large_t), 1);
239 if (large == NULL) {
240 ngx_free(p);
241 return NULL;
242 }
243
244 large->alloc = p;
245 large->next = pool->large;
246 pool->large = large;
247
248 return p;
249 }
Nginx对内存池的典型应用场景:
Nginx中到底分配了哪些内存池?
这些内存池什么时候建立【ngx_create_pool()】的?
又在什么时候进行的销毁【ngx_destroy_pool()】?
事实上,Nginx建立了不止一个内存池,Nginx为每一个层级都会创建一个内存池,进行内存的管理,比如一个模板、tcp连接、http请求等,在对应的生命周期结束的时候会摧毁整个内存池,把分配的内存一次性归还给操作系统:
- ngx_conf_t(配置级别)
- 当worker进程创建时,Nginx也为之创建了一个内存池。Nginx核心的框架代码一直围绕着一个结构体展开,它就是ngx_cycle_t。每个进程都毫无例外的拥有唯一一个独立的ngx_cycle_t结构体。(进程级别)
- (模块级别)
- Nginx为每一个TCP连接都分配一个内存池(connection级别)
- Nginx HTTP框架为每一个HTTP请求都分配一个内存池(request级别)
模块开发者需要评估所申请内存的使用周期:
如果隶属于一个HTTP请求,则在请求的内存池(ngx_http_request_t->pool)上分配内存;
如果隶属于一个连接,则在连接的内存池(ngx_connection_t->pool)上分配内存;
如果一直伴随着模块,则可以在ngx_conf_t的内存池上分配内存。
- 当worker进程创建时,Nginx也为之创建了一个内存池
struct ngx_cycle_s {
void ****conf_ctx;
ngx_pool_t *pool;
ngx_log_t *log;
ngx_log_t new_log;
ngx_uint_t log_use_stderr; /* unsigned log_use_stderr:1; */
ngx_connection_t **files;
ngx_connection_t *free_connections;
ngx_uint_t free_connection_n;
ngx_module_t **modules;
ngx_uint_t modules_n;
ngx_uint_t modules_used; /* unsigned modules_used:1; */
ngx_queue_t reusable_connections_queue;
ngx_array_t listening;
ngx_array_t paths;
ngx_array_t config_dump;
ngx_rbtree_t config_dump_rbtree;
ngx_rbtree_node_t config_dump_sentinel;
ngx_list_t open_files;
ngx_list_t shared_memory;
ngx_uint_t connection_n;
ngx_uint_t files_n;
ngx_connection_t *connections;
ngx_event_t *read_events;
ngx_event_t *write_events;
ngx_cycle_t *old_cycle;
ngx_str_t conf_file;
ngx_str_t conf_param;
ngx_str_t conf_prefix;
ngx_str_t prefix;
ngx_str_t lock_file;
ngx_str_t hostname;
};
- Nginx为每一个TCP连接都分配一个内存池
struct ngx_connection_s {
void *data;
ngx_event_t *read;
ngx_event_t *write;
ngx_socket_t fd;
ngx_recv_pt recv;
ngx_send_pt send;
ngx_recv_chain_pt recv_chain;
ngx_send_chain_pt send_chain;
ngx_listening_t *listening;
off_t sent;
ngx_log_t *log;
/*
* 内存池。
* 一般在accept一个新连接时,会创建一个内存池,
* 而在这个连接结束时会销毁内存池。
* 注意,这里所说的连接时至成功建立的TCP连接,
* 所有的ngx_connection_t结构体都是预分配的。
* 这个内存池的大小酱油上面的listening监听对象中的
* pool_size成员决定。
*/
ngx_pool_t *pool;
int type;
struct sockaddr *sockaddr;
socklen_t socklen;
ngx_str_t addr_text;
ngx_str_t proxy_protocol_addr;
in_port_t proxy_protocol_port;
#if (NGX_SSL || NGX_COMPAT)
ngx_ssl_connection_t *ssl;
#endif
struct sockaddr *local_sockaddr;
socklen_t local_socklen;
ngx_buf_t *buffer;
ngx_queue_t queue;
ngx_atomic_uint_t number;
ngx_uint_t requests;
unsigned buffered:8;
unsigned log_error:3; /* ngx_connection_log_error_e */
unsigned timedout:1;
unsigned error:1;
unsigned destroyed:1;
unsigned idle:1;
unsigned reusable:1;
unsigned close:1;
unsigned shared:1;
unsigned sendfile:1;
unsigned sndlowat:1;
unsigned tcp_nodelay:2; /* ngx_connection_tcp_nodelay_e */
unsigned tcp_nopush:2; /* ngx_connection_tcp_nopush_e */
unsigned need_last_buf:1;
#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT)
unsigned busy_count:2;
#endif
#if (NGX_THREADS || NGX_COMPAT)
ngx_thread_task_t *sendfile_task;
#endif
};
因此,
内存池的创建在:
void
ngx_event_accept(ngx_event_t *ev)
{
......
c->pool = ngx_create_pool(ls->pool_size, ev->log);
if (c->pool == NULL) {
ngx_close_accepted_connection(c);
return;
}
......
}
void
ngx_event_recvmsg(ngx_event_t *ev)
{
......
c->pool = ngx_create_pool(ls->pool_size, ev->log);
if (c->pool == NULL) {
ngx_close_accepted_connection(c);
return;
}
......
}
内存池的销毁在:
static void
ngx_close_accepted_connection(ngx_connection_t *c)
{
......
if (c->pool) {
ngx_destroy_pool(c->pool);
}
......
}
void
ngx_http_close_connection(ngx_connection_t *c)
{
ngx_pool_t *pool;
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
"close http connection: %d", c->fd);
#if (NGX_HTTP_SSL)
if (c->ssl) {
if (ngx_ssl_shutdown(c) == NGX_AGAIN) {
c->ssl->handler = ngx_http_close_connection;
return;
}
}
#endif
#if (NGX_STAT_STUB)
(void) ngx_atomic_fetch_add(ngx_stat_active, -1);
#endif
c->destroyed = 1;
pool = c->pool;
ngx_close_connection(c);
ngx_destroy_pool(pool);
}
- Nginx HTTP框架为每一个HTTP请求都分配一个内存池
struct ngx_http_request_s {
uint32_t signature; /* "HTTP" */
ngx_connection_t *connection;
void **ctx;
void **main_conf;
void **srv_conf;
void **loc_conf;
ngx_http_event_handler_pt read_event_handler;
ngx_http_event_handler_pt write_event_handler;
#if (NGX_HTTP_CACHE)
ngx_http_cache_t *cache;
#endif
ngx_http_upstream_t *upstream;
ngx_array_t *upstream_states;
/* of ngx_http_upstream_state_t */
/*
* 用于这个请求的内存池。
* 在ngx_http_free_request方法中销毁。
* 它与ngx_connection_t中的内存池意义不同,
* 当请求释放时,TCP连接可能并没有关闭,这时请求的内存池会销毁
* 但,ngx_connection_t中的内存池并不会销毁。
*/
ngx_pool_t *pool;
ngx_buf_t *header_in;
ngx_http_headers_in_t headers_in;
ngx_http_headers_out_t headers_out;
ngx_http_request_body_t *request_body;
time_t lingering_time;
time_t start_sec;
ngx_msec_t start_msec;
ngx_uint_t method;
ngx_uint_t http_version;
ngx_str_t request_line;
ngx_str_t uri;
ngx_str_t args;
ngx_str_t exten;
ngx_str_t unparsed_uri;
ngx_str_t method_name;
ngx_str_t http_protocol;
ngx_chain_t *out;
ngx_http_request_t *main;
ngx_http_request_t *parent;
ngx_http_postponed_request_t *postponed;
ngx_http_post_subrequest_t *post_subrequest;
ngx_http_posted_request_t *posted_requests;
ngx_int_t phase_handler;
ngx_http_handler_pt content_handler;
ngx_uint_t access_code;
ngx_http_variable_value_t *variables;
#if (NGX_PCRE)
ngx_uint_t ncaptures;
int *captures;
u_char *captures_data;
#endif
size_t limit_rate;
size_t limit_rate_after;
/* used to learn the Apache compatible response length without a header */
size_t header_size;
off_t request_length;
ngx_uint_t err_status;
ngx_http_connection_t *http_connection;
ngx_http_v2_stream_t *stream;
ngx_http_log_handler_pt log_handler;
ngx_http_cleanup_t *cleanup;
unsigned count:16;
unsigned subrequests:8;
unsigned blocked:8;
unsigned aio:1;
unsigned http_state:4;
/* URI with "/." and on Win32 with "//" */
unsigned complex_uri:1;
/* URI with "%" */
unsigned quoted_uri:1;
/* URI with "+" */
unsigned plus_in_uri:1;
/* URI with " " */
unsigned space_in_uri:1;
unsigned invalid_header:1;
unsigned add_uri_to_alias:1;
unsigned valid_location:1;
unsigned valid_unparsed_uri:1;
unsigned uri_changed:1;
unsigned uri_changes:4;
unsigned request_body_in_single_buf:1;
unsigned request_body_in_file_only:1;
unsigned request_body_in_persistent_file:1;
unsigned request_body_in_clean_file:1;
unsigned request_body_file_group_access:1;
unsigned request_body_file_log_level:3;
unsigned request_body_no_buffering:1;
unsigned subrequest_in_memory:1;
unsigned waited:1;
#if (NGX_HTTP_CACHE)
unsigned cached:1;
#endif
#if (NGX_HTTP_GZIP)
unsigned gzip_tested:1;
unsigned gzip_ok:1;
unsigned gzip_vary:1;
#endif
unsigned proxy:1;
unsigned bypass_cache:1;
unsigned no_cache:1;
/*
* instead of using the request context data in
* ngx_http_limit_conn_module and ngx_http_limit_req_module
* we use the single bits in the request structure
*/
unsigned limit_conn_set:1;
unsigned limit_req_set:1;
#if 0
unsigned cacheable:1;
#endif
unsigned pipeline:1;
unsigned chunked:1;
unsigned header_only:1;
unsigned keepalive:1;
unsigned lingering_close:1;
unsigned discard_body:1;
unsigned reading_body:1;
unsigned internal:1;
unsigned error_page:1;
unsigned filter_finalize:1;
unsigned post_action:1;
unsigned request_complete:1;
unsigned request_output:1;
unsigned header_sent:1;
unsigned expect_tested:1;
unsigned root_tested:1;
unsigned done:1;
unsigned logged:1;
unsigned buffered:4;
unsigned main_filter_need_in_memory:1;
unsigned filter_need_in_memory:1;
unsigned filter_need_temporary:1;
unsigned allow_ranges:1;
unsigned subrequest_ranges:1;
unsigned single_range:1;
unsigned disable_not_modified:1;
unsigned stat_reading:1;
unsigned stat_writing:1;
unsigned stat_processing:1;
unsigned health_check:1;
/* used to parse HTTP headers */
ngx_uint_t state;
ngx_uint_t header_hash;
ngx_uint_t lowcase_index;
u_char lowcase_header[NGX_HTTP_LC_HEADER_LEN];
u_char *header_name_start;
u_char *header_name_end;
u_char *header_start;
u_char *header_end;
/*
* a memory that can be reused after parsing a request line
* via ngx_http_ephemeral_t
*/
u_char *uri_start;
u_char *uri_end;
u_char *uri_ext;
u_char *args_start;
u_char *request_start;
u_char *request_end;
u_char *method_end;
u_char *schema_start;
u_char *schema_end;
u_char *host_start;
u_char *host_end;
u_char *port_start;
u_char *port_end;
unsigned http_minor:16;
unsigned http_major:16;
};
因此,
内存池的创建在:
ngx_http_request_t *
ngx_http_create_request(ngx_connection_t *c)
{
ngx_pool_t *pool;
ngx_time_t *tp;
ngx_http_request_t *r;
ngx_http_log_ctx_t *ctx;
ngx_http_connection_t *hc;
ngx_http_core_srv_conf_t *cscf;
ngx_http_core_loc_conf_t *clcf;
ngx_http_core_main_conf_t *cmcf;
c->requests++;
hc = c->data;
cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module);
pool = ngx_create_pool(cscf->request_pool_size, c->log);
if (pool == NULL) {
return NULL;
}
r = ngx_pcalloc(pool, sizeof(ngx_http_request_t));
if (r == NULL) {
ngx_destroy_pool(pool);
return NULL;
}
r->pool = pool;
......
}
内存池的销毁在:
void
ngx_http_free_request(ngx_http_request_t *r, ngx_int_t rc)
{
......
pool = r->pool;
r->pool = NULL;
ngx_destroy_pool(pool);
}
拓展
另,这里稍微拓展下:
有一个很棒的模块可以实时观察各Nginx进程中内存池的使用情况,对分析模块使用内存池的情况提供了很好的帮助。
ngx_http_debug_pool_module
http://tengine.taobao.org/document_cn/ngx_debug_pool_cn.html
https://github.com/chobits/ngx_debug_pool
实现原理很简单,通过实现一个:ngx_pool_stat_t类型数组来记录每次内存池分配、使用、销毁等情况。具体的动作,通过在原有代码ngx_palloc.c | .h 中插入记录功能来实现。
struct ngx_pool_stat_s {
u_char *func;
size_t size;
size_t num; /* number of total pools */
size_t cnum; /* number of current used pools */
size_t lnum; /* number of calling ngx_palloc_large() */
ngx_pool_stat_t *next;
};
替换原有ngx_palloc.c | .h 代码:
$ cat config
ngx_addon_name=ngx_http_debug_pool_module
HTTP_MODULES="$HTTP_MODULES ngx_http_debug_pool_module"
NGX_ADDON_SRCS="$NGX_ADDON_SRCS $ngx_addon_dir/ngx_http_debug_pool_module.c"
# use ngx_debug_pool/ngx_palloc.* instead of src/core/ngx_palloc.*
CORE_DEPS=`echo "$CORE_DEPS" | sed "s/[^ \n\t]*ngx_palloc.h//g"`
CORE_DEPS="$CORE_DEPS $ngx_addon_dir/ngx_palloc.h"
CORE_INCS="-I $ngx_addon_dir $CORE_INCS"
CORE_SRCS=`echo "$CORE_SRCS" | sed "s/[^ \n\t]*ngx_palloc.c//g"`
NGX_ADDON_SRCS="$NGX_ADDON_SRCS $ngx_addon_dir/ngx_palloc.c"
have=NGX_DEBUG_POOL . auto/have