static __always_inline void *slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr){ void **object; struct kmem_cache_cpu *c; unsigned long flags; unsigned int objsize;
gfpflags &= gfp_allowed_mask;
lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT);
if (should_failslab(s->objsize, gfpflags))
return NULL;
local_irq_save(flags);//把当前中断状态保存到flags中,并禁用当前cpu的中断 ;如果调用链中有多个中断应使用 local_irq_save ,不应使用
// local_irq_disable 与local_irq_enable
c = get_cpu_slab(s, smp_processor_id());//http://hi.baidu.com/_kouu/item/7c0cf80d4d29c7e1ff240dd1
objsize = c->objsize;
if (unlikely(!c->freelist || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
object = c->freelist;
c->freelist = object[c->offset];
stat(c, ALLOC_FASTPATH);
}
local_irq_restore(flags);//与local_irq_save配对使用
if (unlikely((gfpflags & __GFP_ZERO) && object))
memset(object, 0, objsize);
kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
return object;
}
slab_alloc 函数分析
最新推荐文章于 2023-02-04 15:53:17 发布