在Linux中,对物理内存的管理是怎么实现的呢?对页面的分配和回收是如何实现的呢?
Linux中对物理内存的管理是通过zone来管理的,以X86为例,16MB以下的物理内存为DMA zone;896MB以下的区域为normal zone;然后896MB以上的区域统称为HighMemory zone;然后在分配内存的时候,可以指定区域来实现在某个特定的zone分配页面;
那么zone又是如何管理物理内存的呢?Linux同样是使用伙伴系统来管理物理内存,来尽量避免碎片的产生;对伙伴系统的原理应该很清楚,就不说了
struct zone {
/* Fields commonly accessed by the page allocator */
/* zone watermarks, access with *_wmark_pages(zone) macros */
unsigned long watermark[NR_WMARK];
/*
* We don't know if the memory that we're going to allocate will be freeable
* or/and it will be released eventually, so to avoid totally wasting several
* GB of ram we must reserve some of the lower zone memory (otherwise we risk
* to run OOM on the lower zones despite there's tons of freeable ram
* on the higher zones). This array is recalculated at runtime if the
* sysctl_lowmem_reserve_ratio sysctl changes.
*/
unsigned long lowmem_reserve[MAX_NR_ZONES];
struct per_cpu_pageset __percpu *pageset;
/*
* free areas of different sizes
*/
spinlock_t lock;
struct free_area free_area[MAX_ORDER];
/* Fields commonly accessed by the page reclaim scanner */
spinlock_t lru_lock;
struct lruvec lruvec;
unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */
wait_queue_head_t * wait_table;
unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits;
/*
* spanned_pages is the total pages spanned by the zone, including
* holes, which is calculated as:
* spanned_pages = zone_end_pfn - zone_start_pfn;
*
* present_pages is physical pages existing within the zone, which
* is calculated as:
* present_pages = spanned_pages - absent_pages(pages in holes);
*
* managed_pages is present pages managed by the buddy system, which
* is calculated as (reserved_pages includes pages allocated by the
* bootmem allocator):
* managed_pages = present_pages - reserved_pages;
*
* So present_pages may be used by memory hotplug or memory power
* management logic to figure out unmanaged pages by checking
* (present_pages - managed_pages). And managed_pages should be used
* by page allocator and vm scanner to calculate all kinds of watermarks
* and thresholds.
*/
unsigned long spanned_pages;
unsigned long present_pages;
unsigned long managed_pages;
/*
* rarely used fields:
*/
const char *name;
};
其中和伙伴系统有关的是:
struct free_area free_area[MAX_ORDER]; //MAX_ORDER=11
11个free_area 分别为 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 和 1024个页面,所以最大的为4MB;
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
};
所有的空闲页面都挂在free_list下;伙伴系统的回收和分配方式很容易理解,就不说了,至于代码就不贴在这里了
伙伴系统是物理内存页分配的基础,在分配页面的时候,最主要的函数是_ _alloc_pages( ),而它的大体过程为
for (i = 0; (z=zonelist->zones[i]) != NULL; i++) {
if (zone_watermark_ok(z, order, ...)) {
page = buffered_rmqueue(z, order, gfp_mask);
if (page)
return page;
}
}
zone_watermark_ok(z, order, ...)函数,与zone的临界值有关,用来检测内存分配的安全性,以后还会详细的讨论。
buffered_rmqueue()函数是在给定的zone分配页面,它继续调用_ _rmqueue( )到伙伴系统中分配页面;
struct free_area *area;
unsigned int current_order;
for (current_order=order; current_order<11; ++current_order) {
area = zone->free_area + current_order;
if (!list_empty(&area->free_list))
goto block_found;
}
block_found:
page = list_entry(area->free_list.next, struct page, lru);
list_del(&page->lru);
ClearPagePrivate(page);
page->private = 0;
area->nr_free--;
zone->free_pages -= 1UL << order;
size = 1 << curr_order;
while (curr_order > order) {
area--;
curr_order--;
size >>= 1;
buddy = page + size;
/* insert buddy as first element in the list */
list_add(&buddy->lru, &area->free_list);
area->nr_free++;
buddy->private = curr_order;
SetPagePrivate(buddy);
}
return page;
return NULL;
这样就可以完成对物理页面的分配了,所有的物理页面的分配,例如slab的分配,都是基于上面的过程实现的!