for (i = 0; i < NUM_INIT_LISTS; i++) {
kmem_list3_init(&initkmem_list3[i]);
if (i < MAX_NUMNODES)
cache_cache.nodelists[i] = NULL;
}
根据内存结点的数目来初始化kmem_list3结构,每个结点有3个kmem_list3,UMA下内存只有一个结点。
/* 1) create the cache_cache */
INIT_LIST_HEAD(&cache_chain);
list_add(&cache_cache.next, &cache_chain);
初始化时需要使用kmalloc来分配内存,但此时SLAB没有建立,所以使用kmem_cache cache_cache作为临时使用,将cache_cache将入
链表。
cache_cache.colour_off = cache_line_size();
设置着色偏移,目前为L1缓冲大小,内核使用colour_off*X来计算偏移值。
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
设置per cpu 相关数据结构。
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
设置kmem_list3到nodelists中。UMA下node为0
cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
nr_node_ids * sizeof(struct kmem_list3 *);
计算kmem_cache数据结构大小,以该大小作为SLAB中每个对象的大小,offsetof(struct kmem_cache, nodelists) 计算nodelists在 kmem_cache中的相对偏移
#if DEBUG
cache_cache.obj_size = cache_cache.buffer_size;
#endif
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
cache_line_size());
对齐
cache_cache.reciprocal_buffer_size =
reciprocal_value(cache_cache.buffer_size);
计算buffer_size的倒数,方便以后的除法。
for (order = 0; order < MAX_ORDER; order++) {
cache_estimate(order, cache_cache.buffer_size,
cache_line_size(), 0, &left_over, &cache_cache.num);
if (cache_cache.num)
break;
}
根据对象的大小,来计算需要给的最小的页数。
计算公式:PAGE_SIZE << gfporder = head + num*buffer_size+left_over;
如果管理数据不在SLAB中(kmem_cache.flags without CFLGS_OFF_SLAB)那么
head = 0;
否则
head = sizeof(struct slab) +num*sizeof(kmem_bufctl_t);
sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL);
if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep =
kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL);
}
创建struct arraycache_init和kmem_list3的缓冲,以供后面的kmalloc分配使用。
while (sizes->cs_size != ULONG_MAX) {
/*
* For performance, all the general caches are L1 aligned.
* This should be particularly beneficial on SMP boxes, as it
* eliminates "false sharing".
* Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches.
*/
if (!sizes->cs_cachep) {
sizes->cs_cachep = kmem_cache_create(names->name,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL);
}
#ifdef CONFIG_ZONE_DMA
sizes->cs_dmacachep = kmem_cache_create(
names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC,
NULL);
#endif
sizes++;
names++;
}
对其余的创建缓冲。
struct array_cache *ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
memcpy(ptr, cpu_cache_get(&cache_cache),
sizeof(struct arraycache_init));
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
spin_lock_init(&ptr->lock);
cache_cache.array[smp_processor_id()] = ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
!= &initarray_generic.cache);
memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
sizeof(struct arraycache_init));
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
spin_lock_init(&ptr->lock);
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
ptr;
分配array_cache对象,并替换cache_cache和cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)中的初始化时使用的array_cache。
{
int nid;
for_each_online_node(nid) {
init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
init_list(malloc_sizes[INDEX_AC].cs_cachep,
&initkmem_list3[SIZE_AC + nid], nid);
if (INDEX_AC != INDEX_L3) {
init_list(malloc_sizes[INDEX_L3].cs_cachep,
&initkmem_list3[SIZE_L3 + nid], nid);
}
}
}
init_list函数分配kmem_list3对象,并替换cache_cache和malloc_sizes[INDEX_AC].cs_cachep中使用的初始化时的kmem_list3。
g_cpucache_up = EARLY;
SLAB分配器已经就绪。