slab分配器

http://www.cnblogs.com/ggzwtj/archive/2011/08/11/2135348.html

http://www.cnblogs.com/ggzwtj/archive/2011/08/12/2136027.html

为什么要用slab分配器?程序运行的很多时候并不是去分配一下大的内存,比如task_struct这样的小的结构,如果用伙伴系统来管理者部分的内存分配就太慢了。还有在调用伙伴系统那个的时候对系统的数据和指令高速缓存有相当的影响(slab会减少对伙伴系统的调用)。如果数据存储在伙伴系统提供的页中,那么其地址总是出现在2的幂次的整数倍附近,这对高速缓存的利用有负面影响,这种分布使得部分缓存使用过度,而其他的则几乎为空。

每个缓存都定义为一个kmem_cache结构(该结构在内核中的其他地方不可见因为是放在.c中的 =.=):

复制代码
struct kmem_cache {
struct array_cache *array[NR_CPUS];/* 为每个cpu定义一个项 */

unsigned
int batchcount; /* 每次填充(移除)的数目 */
unsigned
int limit; /* 每个cpu保存对象的最大数目 */
unsigned
int shared;

unsigned
int buffer_size; /* 缓存中管理的对象的长度 */
u32 reciprocal_buffer_size;

unsigned
int flags; /* 标志(不会改变) */
unsigned
int num; /* 每个slab中object的数量 */

unsigned
int gfporder; /* slab的order */

gfp_t gfpflags;

size_t colour;
unsigned
int colour_off; /* 偏移量,这个应该是关键吧 */
struct kmem_cache *slabp_cache;
unsigned
int slab_size;
unsigned
int dflags;

void (*ctor)(struct kmem_cache *, void *);

const char *name;
struct list_head next;

#if STATS
unsigned
long num_active;
unsigned
long num_allocations;
unsigned
long high_mark;
unsigned
long grown;
unsigned
long reaped;
unsigned
long errors;
unsigned
long max_freeable;
unsigned
long node_allocs;
unsigned
long node_frees;
unsigned
long node_overflow;
atomic_t allochit;
atomic_t allocmiss;
atomic_t freehit;
atomic_t freemiss;
#endif
#if DEBUG
int obj_offset;
int obj_size;
#endif
struct kmem_list3 *nodelists[MAX_NUMNODES]; /* */
};
复制代码

内核为每个cpu提供一个array_cache实例:

复制代码
struct array_cache {
unsigned
int avail; /* 当前可用对象的数目 */
unsigned
int limit;
unsigned
int batchcount;
unsigned
int touched; /* 从缓存中移除的时候touched设为1,缓存收缩时设置为0 */
spinlock_t
lock;
void *entry[]; /* 伪数组,为了便于访问各个对象 */
};
复制代码

用于管理slab链表的表头保存在一个独立的数据结构中:

复制代码
struct kmem_list3 {
struct list_head slabs_partial; /* 这种顺序能生成更好的汇编代码? */
struct list_head slabs_full;
struct list_head slabs_free;

unsigned
long free_objects; /* 空闲对象的数目 */
unsigned
int free_limit; /* 空闲对象的上限 */
unsigned
int colour_next; /* 各节点缓存着色 */
spinlock_t list_lock;
struct array_cache *shared; /* 节点内共享 */
struct array_cache **alien; /* 节点间共享 */
unsigned
long next_reap; /* 两次尝试收缩缓存必须经过的时间,防止频繁地缓存的收缩和增长 */
int free_touched; /* */
};
复制代码

下面开始看一下slab分配器的启动过程,初始化函数为:

void __init kmem_cache_init(void)

然后循环初始化NUM_INIT_LISTS个kmem_list3 结构:

         for (i = 0; i < NUM_INIT_LISTS; i++) {
kmem_list3_init(
&initkmem_list3[i]);
if (i < MAX_NUMNODES)
cache_cache.nodelists[i]
= NULL;
}

kmem_list3 结构初始化的过程如下(这些都很清楚):

复制代码
static void kmem_list3_init(struct kmem_list3 *parent)
{
INIT_LIST_HEAD(
&parent->slabs_full);
INIT_LIST_HEAD(
&parent->slabs_partial);
INIT_LIST_HEAD(
&parent->slabs_free);
parent
->shared = NULL;
parent
->alien = NULL;
parent
->colour_next = 0;
spin_lock_init(
&parent->list_lock);
parent
->free_objects = 0;
parent
->free_touched = 0;
}
复制代码

关于cache_cache定义如下:

复制代码
static struct kmem_cache cache_cache = {
.batchcount
= 1,
.limit
= BOOT_CPUCACHE_ENTRIES,
.shared
= 1,
.buffer_size
= sizeof(struct kmem_cache),
.name
= "kmem_cache",
};
复制代码

这个就初始化好了一个kmem_list3 数组:initkmem_list3(这个内存应该是在编译内核的时候已经分配了,所以这块就不涉及小内存分配的问题),然后把initkmem_list3的值赋给cache_cache:

复制代码
/* set_up_list3s(&cache_cache, CACHE_CACHE); CACHE_CACHE为0*/
static void __init set_up_list3s(struct kmem_cache *cachep, int index)
{
int node;
for_each_online_node(node) {
cachep
->nodelists[node] = &initkmem_list3[index + node];
cachep
->nodelists[node]->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
}
}
复制代码

判断现在系统中用掉的内存是多少,如果大于32MB就设置slab_break_gfp_order,是在大于32MB的时候要注意处理碎片?:

        if (num_physpages > (32 << 20) >> PAGE_SHIFT)
slab_break_gfp_order
= BREAK_GFP_ORDER_HI;

然后把cache_cache加入到cache_chain链表中:

复制代码
        node = numa_node_id();

INIT_LIST_HEAD(
&cache_chain);
list_add(
&cache_cache.next, &cache_chain);
cache_cache.colour_off
= cache_line_size();
cache_cache.array[smp_processor_id()]
= &initarray_cache.cache;
cache_cache.nodelists[node]
= &initkmem_list3[CACHE_CACHE + node];
cache_cache.buffer_size
= offsetof(struct kmem_cache, nodelists) + nr_node_ids * sizeof(struct kmem_list3 *);
#if DEBUG
cache_cache.obj_size
= cache_cache.buffer_size;
#endif
cache_cache.buffer_size
= ALIGN(cache_cache.buffer_size, cache_line_size());
cache_cache.reciprocal_buffer_size
= reciprocal_value(cache_cache.buffer_size);
复制代码

cache_estimate在计算object数目的时候要分两种情况,一种是object在slab中,另一种是不在slab中的。循环oredr在找到一个object数目不为0的时候就跳出循环,这时候也初始化了cache_cache中的属性:

复制代码
        for (order = 0; order < MAX_ORDER; order++) {
cache_estimate(order, cache_cache.buffer_size, cache_line_size(),
0, &left_over, &cache_cache.num);
if (cache_cache.num)
break;
}
BUG_ON(
!cache_cache.num);
static void cache_estimate(unsigned long gfporder, size_t buffer_size, size_t align, int flags, size_t *left_over, unsigned int *num)
{
int nr_objs;
size_t mgmt_size;
size_t slab_size
= PAGE_SIZE << gfporder;

if (flags & CFLGS_OFF_SLAB) {
mgmt_size
= 0;
nr_objs
= slab_size / buffer_size;

if (nr_objs > SLAB_LIMIT)
nr_objs
= SLAB_LIMIT;
}
else {
nr_objs
= (slab_size - sizeof(struct slab)) / (buffer_size + sizeof(kmem_bufctl_t));
if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size > slab_size)
nr_objs
--;

if (nr_objs > SLAB_LIMIT)
nr_objs
= SLAB_LIMIT;

mgmt_size
= slab_mgmt_size(nr_objs, align);
}
*num = nr_objs;
*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
}
复制代码

设置cache_cache中的其他的一些属性:

        cache_cache.gfporder = order;
cache_cache.colour
= left_over / cache_cache.colour_off;
cache_cache.slab_size
= ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + sizeof(struct slab), cache_line_size());

初始化cache,为array_cache和kmem_list3分配空间,不然后面的都玩不起来了:

复制代码
        sizes = malloc_sizes;
names
= cache_names;

sizes[INDEX_AC].cs_cachep
= kmem_cache_create(names[INDEX_AC].name, sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL);

/*
* 创建一个cache
* 参数:
* name:在/proc/slabinfo中看到的名字;
* size:这个缓存中object的数目
* align:对齐;
* flags:标志;
* ctor:构建器;
* 返回:
* kmem_cache;
*/
struct kmem_cache * kmem_cache_create (const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(struct kmem_cache *, void *))
{
size_t left_over, slab_size, ralign;
struct kmem_cache *cachep = NULL, *pc;
/* 检查参数,如果没有name、在中断中、size太小或太大就说明有错 */
if (!name || in_interrupt() || (size < BYTES_PER_WORD) || size > KMALLOC_MAX_SIZE) {
printk(KERN_ERR
"%s: Early error in slab %s\n", __FUNCTION__, name);
BUG();
}
/* 用cache_chain_mutex来保护cpu_online_map */
get_online_cpus();
mutex_lock(
&cache_chain_mutex);
/* 遍历 */
list_for_each_entry(pc,
&cache_chain, next) {
char tmp;
int res;
/*
* probe_kernel_address(addr, retval)
* addr:读取的地址,读取的类型为retval
* retval:读到这个变量中
* 模块没有卸载并且没有销毁它的slab cache时使用(这个好像和这次讨论的主题关系不大)
*/
res
= probe_kernel_address(pc->name, tmp);
if (res) {
printk(KERN_ERR
"SLAB: cache with size %d has lost its name\n",pc->buffer_size);
continue;
}
/* 如果pc中有name和要创建的slab cache的名字一样就报错了 */
if (!strcmp(pc->name, name)) {
printk(KERN_ERR
"kmem_cache_create: duplicate cache %s\n", name);
dump_stack();
goto oops;
}
}
BUG_ON(flags
& ~CREATE_MASK);
/* 设置对齐 */
if (size & (BYTES_PER_WORD - 1)) {
size
+= (BYTES_PER_WORD - 1);
size
&= ~(BYTES_PER_WORD - 1);
}
/* 系统推荐的方式 */
if (flags & SLAB_HWCACHE_ALIGN) {
/* 取得默认对齐值,如果一个对象比较小,会将多个对象挤到一个缓存行中 */
ralign
= cache_line_size();
while (size <= ralign / 2)
ralign
/= 2;
}
else {
ralign
= BYTES_PER_WORD;
}
if (flags & SLAB_STORE_USER)
ralign
= BYTES_PER_WORD;

if (flags & SLAB_RED_ZONE) {
ralign
= REDZONE_ALIGN;
size
+= REDZONE_ALIGN - 1;
size
&= ~(REDZONE_ALIGN - 1);
}
/* 体系结构要求的最小值 */
if (ralign < ARCH_SLAB_MINALIGN) {
ralign
= ARCH_SLAB_MINALIGN;
}
/* 调用者强制的最小值 */
if (ralign < align) {
ralign
= align;
}

if (ralign > __alignof__(unsigned long long))
flags
&= ~(SLAB_RED_ZONE | SLAB_STORE_USER);

align
= ralign;
/* 分配一个kmem_cache的新实例 */
cachep
= kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
if (!cachep)
goto oops;
/* 确定是否将slab头存储在slab之上 */
if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
flags
|= CFLGS_OFF_SLAB;
/* 增大对象的长度,直到algin */
size
= ALIGN(size, align);
/* 通过迭代找到理想的slab长度 */
left_over
= calculate_slab_order(cachep, size, align, flags);
if (!cachep->num) {
printk(KERN_ERR
"kmem_cache_create: couldn't create cache %s.\n", name);
kmem_cache_free(
&cache_cache, cachep);
cachep
= NULL;
goto oops;
}
/* 确保内核最少能容纳一个object */
slab_size
= ALIGN(cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab), align);
/* 看能不能放下slab头 */
if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
flags
&= ~CFLGS_OFF_SLAB;
left_over
-= slab_size;
}
if (flags & CFLGS_OFF_SLAB) {
slab_size
= cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
}
/* 使用L1缓冲行的长度作为偏移量 */
cachep
->colour_off = cache_line_size();
/* 必须是align的整数倍 */
if (cachep->colour_off < align)
cachep
->colour_off = align;
cachep
->colour = left_over / cachep->colour_off;
cachep
->slab_size = slab_size;
cachep
->flags = flags;
cachep
->gfpflags = 0;
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
cachep
->gfpflags |= GFP_DMA;
cachep
->buffer_size = size;
cachep
->reciprocal_buffer_size = reciprocal_value(size);

if (flags & CFLGS_OFF_SLAB) {
cachep
->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
BUG_ON(ZERO_OR_NULL_PTR(cachep
->slabp_cache));
}
cachep
->ctor = ctor;
cachep
->name = name;
/* 产生per-CPU缓存? */
if (setup_cpu_cache(cachep)) {
__kmem_cache_destroy(cachep);
cachep
= NULL;
goto oops;
}

list_add(
&cachep->next, &cache_chain);
oops:
if (!cachep && (flags & SLAB_PANIC))
panic(
"kmem_cache_create(): failed to create slab `%s'\n", name);
mutex_unlock(
&cache_chain_mutex);
put_online_cpus();
return cachep;
}
复制代码
复制代码
        if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep
=
kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS
|SLAB_PANIC,
NULL);
}
复制代码

初始化names和sizes这两个数组:

复制代码
        slab_early_init = 0;

while (sizes->cs_size != ULONG_MAX) {
/* 所有的通用slab都是按照L1对齐的,这样做在SMP结构上时尤其有用的,使得更紧凑*/
if (!sizes->cs_cachep) {
sizes
->cs_cachep = kmem_cache_create(names->name,
sizes
->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS
|SLAB_PANIC,
NULL);
}
#ifdef CONFIG_ZONE_DMA
sizes
->cs_dmacachep = kmem_cache_create(
names
->name_dma,
sizes
->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS
|SLAB_CACHE_DMA|SLAB_PANIC,
NULL);
#endif
sizes
++;
names
++;
}
复制代码

替换移到程序head arrays,感觉就是处理cpu的缓存:

复制代码
        {
struct array_cache *ptr;
int this_cpu;
/* 分配空间 */
ptr
= kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
/* 使cpu不能被中断 */
slab_irq_disable(this_cpu);
BUG_ON(cpu_cache_get(
&cache_cache, this_cpu) != &initarray_cache.cache);
/* 取得cpu的cache放在ptr中*/
memcpy(ptr, cpu_cache_get(
&cache_cache, this_cpu), sizeof(struct arraycache_init));
spin_lock_init(
&ptr->lock);
/* 设置cache_cache中的array */
cache_cache.array[this_cpu]
= ptr;
slab_irq_enable(this_cpu);
ptr
= kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
slab_irq_disable(this_cpu);
BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep, this_cpu)
!= &initarray_generic.cache);
memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep, this_cpu),
sizeof(struct arraycache_init));

spin_lock_init(
&ptr->lock);
/* 处理malloc的cpu缓存 */
malloc_sizes[INDEX_AC].cs_cachep
->array[this_cpu] = ptr;
slab_irq_enable(this_cpu);
}
复制代码

处理kmem_list3:

复制代码
        {
int nid;
/* 遍历所有状态,这里应该是“满”、“部分”和“全空”,初始化对应的链表 */
for_each_online_node(nid) {
init_list(
&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
init_list(malloc_sizes[INDEX_AC].cs_cachep,
&initkmem_list3[SIZE_AC + nid], nid);
if (INDEX_AC != INDEX_L3) {
init_list(malloc_sizes[INDEX_L3].cs_cachep,
&initkmem_list3[SIZE_L3 + nid], nid);
}
}
}
复制代码

把arrays的大小设置为真正的大小:

复制代码
        {
struct kmem_cache *cachep;
mutex_lock(
&cache_chain_mutex);
list_for_each_entry(cachep,
&cache_chain, next)
if (enable_cpucache(cachep))
BUG();
mutex_unlock(
&cache_chain_mutex);
}
复制代码

最后通知其他cpu初始化:

        init_lock_keys();
g_cpucache_up
= FULL;
register_cpu_notifier(
&cpucache_notifier);
}

用slab分配内存的不同方法的共同的入口是__cache_alloc():

  1. 先用should_failslab初步判断是否可以完成分配(标志位可能是NOFAIL);
  2. 调用__do_cache_alloc(cachep, flags, &this_cpu)来完成分配;
  3. 调用cache_alloc_debugcheck_after(cachep, flags, objp, caller)进行分配后检查;
  4. prefetchw(objp)通过预取什么的来优化指令;
  5. return objp。
程序的代码如下:
复制代码
static __always_inline void * __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
{
unsigned
long save_flags;
int this_cpu;
void *objp;
if (should_failslab(cachep, flags))
return NULL;

cache_alloc_debugcheck_before(cachep, flags);
slab_irq_save(save_flags, this_cpu);
objp
= __do_cache_alloc(cachep, flags, &this_cpu);
slab_irq_restore(save_flags, this_cpu);
objp
= cache_alloc_debugcheck_after(cachep, flags, objp, caller);
prefetchw(objp);

if (unlikely((flags & __GFP_ZERO) && objp))
memset(objp,
0, obj_size(cachep));

return objp;
}
复制代码

可以看出这个分配的过程最重要的就是__do_cache_alloc(cachep, flags, &this_cpu)函数了:

  1. 如果设置了PF_SPREAD_SLAB|PF_MEMPOLICY表示就要从别的NODE上分配;
  2. 用____cache_alloc(cache, flags, this_cpu)来从CPU的缓冲中取对象,如果取不到就重新填充缓存;
  3. 如果上步没有分配成功,就调用____cache_alloc_node(cache, flags, cpu_to_node(*this_cpu), this_cpu)来进行非缓存的那种分配,这个会用到那三个链表;
  4. return objp。
下面是代码,两个主要的函数会在下面详细介绍:
复制代码
static __always_inline void * __do_cache_alloc(struct kmem_cache *cache, gfp_t flags, int *this_cpu)
{
void *objp;
if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
objp
= alternate_node_alloc(cache, flags, this_cpu);
if (objp)
goto out;
}
objp
= ____cache_alloc(cache, flags, this_cpu);
if (!objp)
objp
= ____cache_alloc_node(cache, flags, cpu_to_node(*this_cpu), this_cpu);
out:
return objp;
}
复制代码

____cache_alloc(cache, flags, this_cpu);用来从CPU的缓存中去object,这个过程还是比较简单的,就是从对应的array_cache中取object:

  1. 调用cpu_cache_get(cachep, *this_cpu)取得对应的array_cache;
  2. 如果还有剩余的,objp = ac->entry[--ac->avail];
  3. 否则调用cache_alloc_refill(cachep, flags, this_cpu)重新填充;
  4. return objp。
代码如下:
复制代码
static inline void * ____cache_alloc(struct kmem_cache *cachep, gfp_t flags, int *this_cpu)
{
void *objp;
struct array_cache *ac;
check_irq_off();
ac
= cpu_cache_get(cachep, *this_cpu);
if (likely(ac->avail)) {
STATS_INC_ALLOCHIT(cachep);
ac
->touched = 1;
objp
= ac->entry[--ac->avail];
}
else {
STATS_INC_ALLOCMISS(cachep);
objp
= cache_alloc_refill(cachep, flags, this_cpu);
}
return objp;
}
复制代码

上面的代码中最关键的就是cache_alloc_refill(cachep, flags, this_cpu)填充过程,下面就来具体看一下执行过程:

  1. 用锁来保护per-cpu list并使中断可用,这些用check_irq_off()检查;
  2. 用cpu_cache_get(cachep, *this_cpu)取得与CPU相关的array_cache;
  3. 用node = numa_node_id()取得现在的node;
  4. cachep->nodelists[cpu_to_node(*this_cpu)]取得kmem_list3;
  5. 尝试充共享的链表上移过来一些;
  6. 循环来填充batchcount个对象;
    1. 一次尝试从slabs_partial、slabs_free中查看是否有object,如果没有的话就得调用cache_grow()函数了,这个在下面再详细地讨论;
    2. 调用ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,  cpu_to_node(*this_cpu))从对应的slab中取出来填充缓存;
    3. 检查slab的位置是不是应该重新放移过链表了。
  7. 如果cache_grow()都无法解决问题,那么就得retry了。
  8. return ac->entry[--ac->avail]来返回一个对象。
过程还是比较简单的,具体的代码如下:
复制代码
static void * cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, int *this_cpu)
{
int batchcount;
struct kmem_list3 *l3;
struct array_cache *ac;
int node;

retry:
check_irq_off();
node
= numa_node_id();
ac
= cpu_cache_get(cachep, *this_cpu);
batchcount
= ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
batchcount
= BATCHREFILL_LIMIT;
}
l3
= cachep->nodelists[cpu_to_node(*this_cpu)];

BUG_ON(ac
->avail > 0 || !l3);
spin_lock(
&l3->list_lock);

if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
goto alloc_done;

while (batchcount > 0) {
struct list_head *entry;
struct slab *slabp;
entry
= l3->slabs_partial.next;
if (entry == &l3->slabs_partial) {
l3
->free_touched = 1;
entry
= l3->slabs_free.next;
if (entry == &l3->slabs_free)
goto must_grow;
}
slabp
= list_entry(entry, struct slab, list);
check_slabp(cachep, slabp);
check_spinlock_acquired_node(cachep, cpu_to_node(
*this_cpu));
BUG_ON(slabp
->inuse < 0 || slabp->inuse >= cachep->num);
while (slabp->inuse < cachep->num && batchcount--) {
STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
ac
->entry[ac->avail++] = slab_get_obj(cachep, slabp, cpu_to_node(*this_cpu));
}
check_slabp(cachep, slabp);

list_del(
&slabp->list);
if (slabp->free == BUFCTL_END)
list_add(
&slabp->list, &l3->slabs_full);
else
list_add(
&slabp->list, &l3->slabs_partial);
}
must_grow:
l3
->free_objects -= ac->avail;
alloc_done:
spin_unlock(
&l3->list_lock);
if (unlikely(!ac->avail)) {
int x;
x
= cache_grow(cachep, flags | GFP_THISNODE, cpu_to_node(*this_cpu), NULL, this_cpu);
ac
= cpu_cache_get(cachep, *this_cpu);
if (!x && ac->avail == 0) /* no objects in sight? abort */
return NULL;
if (!ac->avail) /* objects refilled by interrupt? */
goto retry;
}
ac
->touched = 1;
return ac->entry[--ac->avail];
}
复制代码

slabs_partial、slabs_free中没有需要的slab的时候就需要调用cache_grow()函数来补充了:

  1. 前面有一大段和上面是相同的,然后计算color offset;
  2. kmem_flagcheck(cachep, flags)检查标志;
  3. 通过kmem_getpages(cachep, local_flags, nodeid)调用伙伴系统分配需要的页;
  4. 用alloc_slabmgmt(cachep, objp, offset, local_flags & ~GFP_CONSTRAINT_MASK, nodeid)分配slab需要的内存并设置;
  5. 调用slab_map_pages(cachep, slabp, objp)把pages映射到给定的cache和slab;
  6. cache_init_objs(cachep, slabp)初始化objects;
  7. 用list_add_tail(&slabp->list, &(l3->slabs_free))把刚申请的slab加到slabs_free中;
  8. 如果成功return 1,否则return 0。
整体的过程还是很清楚的,不过细节还是要再仔细看看,下面是具体的代码:
复制代码
static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid, void *objp, int *this_cpu)
{
struct slab *slabp;
size_t offset;
gfp_t local_flags;
struct kmem_list3 *l3;
BUG_ON(flags
& GFP_SLAB_BUG_MASK);
local_flags
= flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
check_irq_off();
l3
= cachep->nodelists[nodeid];
spin_lock(
&l3->list_lock);
offset
= l3->colour_next;
l3
->colour_next++;
if (l3->colour_next >= cachep->colour)
l3
->colour_next = 0;
spin_unlock(
&l3->list_lock);
offset
*= cachep->colour_off;
if (local_flags & __GFP_WAIT)
slab_irq_enable_nort(
*this_cpu);
slab_irq_enable_rt(
*this_cpu);
kmem_flagcheck(cachep, flags);
if (!objp)
objp
= kmem_getpages(cachep, local_flags, nodeid);
if (!objp)
goto failed;
slabp
= alloc_slabmgmt(cachep, objp, offset, local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
if (!slabp)
goto opps1;
slab_map_pages(cachep, slabp, objp);
cache_init_objs(cachep, slabp);
slab_irq_disable_rt(
*this_cpu);
if (local_flags & __GFP_WAIT)
slab_irq_disable_nort(
*this_cpu);
check_irq_off();
spin_lock(
&l3->list_lock);
list_add_tail(
&slabp->list, &(l3->slabs_free));
STATS_INC_GROWN(cachep);
l3
->free_objects += cachep->num;
spin_unlock(
&l3->list_lock);
return 1;
opps1:
kmem_freepages(cachep, objp);
failed:
slab_irq_disable_rt(
*this_cpu);
if (local_flags & __GFP_WAIT)
slab_irq_disable_nort(
*this_cpu);
return 0;
}
复制代码

这个如果画一个图出来就很清楚了。


  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值