zone里面有个空闲链表,伙伴系统里面所有的page都被挂到了上面。MAX_ORDER一般是11.
struct zone {
...........................
struct free_area free_area[MAX_ORDER];
..........................
};
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
};
enum {
MIGRATE_UNMOVABLE,
MIGRATE_RECLAIMABLE,
MIGRATE_MOVABLE,
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_RESERVE = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
MIGRATE_CMA,
#endif
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
#endif
MIGRATE_TYPES
};
可以看到free_area也是一个数组。因此zone->free_area可以被当做一个二维数组。
数组里面的每个元素,都是2^order个page.
free_area[i].free_list也是一个数组,如下图所示。
伙伴系统页面回收/释放
start_kernel->mm_init->mem_init->free_all_bootmem
unsigned long __init free_all_bootmem(void)
{
unsigned long pages;
reset_all_zones_managed_pages();
pages = free_low_memory_core_early();
totalram_pages += pages;
return pages;
}
static unsigned long __init free_low_memory_core_early(void)
{
unsigned long count = 0;
phys_addr_t start, end;
u64 i;
for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL)
{
printk(KERN_EMERG "\r\n%s, %d, start %lx, end %lx\n", __FUNCTION__, __LINE__, start, end);
count += __free_memory_core(start, end);
}
return count;
}
怎么会有这么多mem range
__free_memory_core-->__free_pages_memory
static void __init __free_pages_memory(unsigned long start, unsigned long end)
{
int order;
while (start < end) {
order = min(MAX_ORDER - 1UL, __ffs(start));
while (start + (1UL << order) > end)
order--;
printk(KERN_EMERG "\r\n%s, %d, start 0x%lx, order 0x%lx\n", __FUNCTION__, __LINE__, start, order);
__free_pages_bootmem(pfn_to_page(start), order);
start += (1UL << order);
}
}
下面是部分输出。可以看到page是以2^order为步长加入伙伴系统中。每一次的步长由起始页帧号第一个bit为1的位置决定。例如
第一次0x60656=(110 0000 0110 0101 0110),第一个bit的位置为1, order = 1,加入2^1个page
第二次0x60658=(110 0000 0110 0101 1000), order=3,加入2^3个page
__free_pages_memory, 90 new round start 0x60656, end 0x68000
__free_pages_memory, 99, start 0x60656, order 0x1
__free_pages_memory, 99, start 0x60658, order 0x3
__free_pages_memory, 99, start 0x60660, order 0x5
__free_pages_memory, 99, start 0x60680, order 0x7
__free_pages_memory, 99, start 0x60700, order 0x8
__free_pages_memory, 99, start 0x60800, order 0xa
__free_pages_memory, 99, start 0x60c00, order 0xa
__free_pages_memory, 99, start 0x61000, order 0xa
__free_pages_memory, 99, start 0x61400, order 0xa
__free_pages_memory, 99, start 0x61800, order 0xa
__free_pages_memory, 99, start 0x61c00, order 0xa
__free_pages_memory, 99, start 0x62000, order 0xa
__free_pages_memory, 99, start 0x62400, order 0xa
__free_pages_memory, 99, start 0x62800, order 0xa
__free_pages_memory, 99, start 0x62c00, order 0xa
__free_pages_memory, 99, start 0x63000, order 0xa
__free_pages_memory, 99, start 0x63400, order 0xa
__free_pages_memory, 99, start 0x63800, order 0xa
__free_pages_memory, 99, start 0x63c00, order 0xa
__free_pages_memory, 99, start 0x64000, order 0xa
__free_pages_memory, 99, start 0x64400, order 0xa
__free_pages_memory, 99, start 0x64800, order 0xa
__free_pages_memory, 99, start 0x64c00, order 0xa
__free_pages_memory, 99, start 0x65000, order 0xa
__free_pages_memory, 99, start 0x65400, order 0xa
__free_pages_memory, 99, start 0x65800, order 0xa
__free_pages_memory, 99, start 0x65c00, order 0xa
__free_pages_memory, 99, start 0x66000, order 0xa
__free_pages_memory, 99, start 0x66400, order 0xa
__free_pages_memory, 99, start 0x66800, order 0xa
__free_pages_memory, 99, start 0x66c00, order 0xa
__free_pages_memory, 99, start 0x67000, order 0xa
__free_pages_memory, 99, start 0x67400, order 0xa
__free_pages_memory, 99, start 0x67800, order 0xa
__free_pages_memory, 99, start 0x67c00, order 0xa
n__free_pages_memory, 90 new round start 0x9f7dc, end 0x9f7dd
__free_pages_memory, 99, start 0x9f7dc, order 0x0
n__free_pages_memory, 90 new round start 0x9f7ee, end 0x9f7ef
__free_pages_memory, 99, start 0x9f7ee, order 0x0
__free_pages_bootmem:将以page开始的2^order个page加入伙伴系统
void __init __free_pages_bootmem(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
struct page *p = page;
unsigned int loop;
prefetchw(p);
for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
prefetchw(p + 1);
__ClearPageReserved(p);
set_page_count(p, 0);
}
__ClearPageReserved(p);
set_page_count(p, 0);
page_zone(page)->managed_pages += nr_pages;
set_page_refcounted(page);
__free_pages(page, order);
}
void __free_pages(struct page *page, unsigned int order)
{
if (put_page_testzero(page)) {
if (order == 0)
free_hot_cold_page(page, false);
else
__free_pages_ok(page, order);
}
}
static void __free_pages_ok(struct page *page, unsigned int order)
{
unsigned long flags;
int migratetype;
unsigned long pfn = page_to_pfn(page);
if (!free_pages_prepare(page, order))
return;
migratetype = get_pfnblock_migratetype(page, pfn);
local_irq_save(flags);
__count_vm_events(PGFREE, 1 << order);
set_freepage_migratetype(page, migratetype);
free_one_page(page_zone(page), page, pfn, order, migratetype);
local_irq_restore(flags);
}
static void free_one_page(struct zone *zone,
struct page *page, unsigned long pfn,
unsigned int order,
int migratetype)
{
spin_lock(&zone->lock);
zone->pages_scanned = 0;
__free_one_page(page, pfn, zone, order, migratetype);
if (unlikely(!is_migrate_isolate(migratetype)))
__mod_zone_freepage_state(zone, 1 << order, migratetype);
spin_unlock(&zone->lock);
}
__free_one_page:这个函数才是真正将page加入到zone->free_area[order].free_list[migratetype]中。该函数将page加入到某个order中,如果可以合并,则继续向上一个order合并。直到不能合并时,将其加入到合适的链表中。
static inline void __free_one_page(struct page *page,
unsigned long pfn,
struct zone *zone, unsigned int order,
int migratetype)
{
unsigned long page_idx;
unsigned long combined_idx;
unsigned long uninitialized_var(buddy_idx);
struct page *buddy;
VM_BUG_ON(!zone_is_initialized(zone));
if (unlikely(PageCompound(page)))
if (unlikely(destroy_compound_page(page, order)))
return;
VM_BUG_ON(migratetype == -1);
/* 获取pfn的低11位,低11位代表什么啊 */
page_idx = pfn & ((1 << MAX_ORDER) - 1);
VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
printk(KERN_EMERG "\r\nYYYYYYYYYYYYY page 0x%p, pfn 0x%lx, page_idx 0x%lx, sizeof(struct page) 0x%lx\n", page, pfn, page_idx, sizeof(struct page));
while (order < MAX_ORDER-1) {
buddy_idx = __find_buddy_index(page_idx, order);
/*
可以把page当做由struct page组成的数组,page + (buddy_idx - page_idx)最终移动的位置
是在page的地址上移动(buddy_idx - page_idx)个sizeof(struct page)的位置
*/
buddy = page + (buddy_idx - page_idx);
printk(KERN_EMERG "\r\page 0x%p, page_idx 0x%lx,buddy 0x%p buddy_idx 0x%lx, order0x%lx\n",\
page, page_idx, buddy, buddy_idx, order);
/* 判断是否可以合并,不可以合并则直接退出 */
if (!page_is_buddy(page, buddy, order))
break;
/*
* Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
* merge with it and move up one order.
*/
if (page_is_guard(buddy)) {
clear_page_guard_flag(buddy);
set_page_private(page, 0);
__mod_zone_freepage_state(zone, 1 << order,
migratetype);
} else {/* 可以合并,则将其从空闲链表中摘下来 */
list_del(&buddy->lru);
zone->free_area[order].nr_free--;
rmv_page_order(buddy);
}
combined_idx = buddy_idx & page_idx;//combined_idx指向两者较小的地址
page = page + (combined_idx - page_idx);
page_idx = combined_idx;
order++;
}
set_page_order(page, order);
/*
* If this is not the largest possible page, check if the buddy
* of the next-highest order is free. If it is, it's possible
* that pages are being freed that will coalesce soon. In case,
* that is happening, add the free page to the tail of the list
* so it's less likely to be used soon and more likely to be merged
* as a higher order page
*/
if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
struct page *higher_page, *higher_buddy;
combined_idx = buddy_idx & page_idx;
higher_page = page + (combined_idx - page_idx);
buddy_idx = __find_buddy_index(combined_idx, order + 1);
higher_buddy = higher_page + (buddy_idx - combined_idx);
if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
list_add_tail(&page->lru,
&zone->free_area[order].free_list[migratetype]);
goto out;
}
}
/* 可以看到此时page被加入到了free_area中 */
list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
zone->free_area[order].nr_free++;
}
上图是函数__free_one_page部分输出
可以看到pfn=0x9e852时,page_idx为0x52(其低11位),其伙伴buddy_idx = 0x50。伙伴的idx在前面
此时order为1。说明该buddy的内容块大小为2^1,包含了2个page。此外从上面的打印可以看到一个page大小为0x20字节
传入内容块的起始页,对应的struct page虚拟地址为0xbffcca40(这个page指向了内容块的起始页,或者说内容块的起始页被0xbffcca40所管理)。那它的伙伴在它前面,并且内容块大小为2,所以伙伴对应的起始页的page地址为0xbffcca40 - 0x20 * 2 = 0xbffcca00
同理page 0xbffcca00,其伙伴在后面,内容块大小为8,其伙伴page为0xbffcca00 + 0x20*8 = 0xbffccb00
传入page的地址应该是个虚拟地址。在初始化的时候会计算有多大的内存,有多少个4k的page,这样就能知道需要多少个struct page管理这些物理内存。此外这些struct page是挨着一起的。感觉就是mem_map
page 0xbffcca00, page_idx 0x50,buddy 0xbffccb00 buddy_idx 0x58, order0x3
为什么page_idx是pfn的低11位构成 ?
/* 获取pfn的低11位,低11位代表什么啊 */
page_idx = pfn & ((1 << MAX_ORDER) - 1);
数组free_area大小是11,对应0-10,内容块的大小从1到1024。那个pfn1的伙伴的物理页帧号pfn2肯定在pfn1-1024到pfn1+1024之间。因此我们只需要用pfn的低11位,那么我们就能得到其伙伴的buddy_idx。这样的话,我感觉不用去pfn的低11位,也能得到伙伴的idx。
参考链接内存分配[三] - Linux中Buddy系统的实现 - 知乎
伙伴算法是以页为单位管理内存,slab是以字节为单位管理内存。slab基于伙伴系统分配的大内存进一步细分为小内存.
MAX_ORDER一般默认为11。free_area是一个链表。数组下标和数组元素中的内存块存在这2阶的关系。下标为i的链表里面的内存块大小为2^i个page(伙伴系统是以page为基本单位)
如果同一个链表的两个页块满足以下三个条件,则被称为伙伴:
1、两个块大小相同,假设为b = 2^i。同一个链表上的页块都是相同的吧;
2、两个块的物理地址连续。这样才能进行合并
3、伙伴中第一个块的起始物理地址是2*b*PAGE_SIZE的整数倍,即起始物理页号为2 * b整数倍。那就是说伙伴的起始块的物理地址都是偶数。两个内存块才能算作一个伙伴。因此[0,b-1]和[b,2*b-1]组成一个伙伴。
如果链表的阶数为n,页块1的页号为page_id,其伙伴的页号查找公式为:
buddy_id = page_id ^ (1 << n);
page_id这个是物理页号的低11位,page_idx = pfn & ((1 << MAX_ORDER) - 1);这样取低11位内存块必然能属于某个链表
假设order = 3, 页块号为0,通过上面的条件可以知道,其伙伴的页号为8。
8 = 0^8;这里是异或
同样假设页号为24(因为order为3,所以是8的整数倍),其伙伴的页号为24^8=16。
其实就是如果页号是偶数,我们就向后找伙伴;而奇数我们就向前找伙伴。
struct zone {
...................
struct free_area free_area[MAX_ORDER];
...................
}
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
};
伙伴系统内存分配
内核中常用的分配物理内存的接口:alloc_pages(),用于分配一个或者多个连续的物理page.
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
gfp(get free page??).
gfp_mask是分配掩码。分配掩码在内核代码中分为两类,一类是zone modifier,另一类是action modifier。
前者指定从哪个zone中分配页面,分配掩码的低4位分别是__GFP_DMA,__GFP_HIGHMEM,__GFP_DMA32,__GFP_MOVABLE
#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define ___GFP_DMA 0x01u
#define ___GFP_HIGHMEM 0x02u
#define ___GFP_DMA32 0x04u
#define ___GFP_MOVABLE 0x08u
action modifier会改变分配行为,其定义如下
#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
............................................
/* 还有很多没有贴完 */
#define ___GFP_WAIT 0x10u
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
............................................
alloc_pages-->alloc_pages_node(numa_node_id(), gfp_mask, order)-->
__alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask))->
__alloc_pages_nodemask(gfp_mask, order, zonelist, NULL)
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, nodemask_t *nodemask)
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);//从gfp_mask中计算出zone的zoneidx
struct zone *preferred_zone;
struct zoneref *preferred_zoneref;
struct page *page = NULL;
int migratetype = allocflags_to_migratetype(gfp_mask);
unsigned int cpuset_mems_cookie;
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
int classzone_idx;
gfp_mask &= gfp_allowed_mask;
lockdep_trace_alloc(gfp_mask);
might_sleep_if(gfp_mask & __GFP_WAIT);
if (should_fail_alloc_page(gfp_mask, order))
return NULL;
/*
* Check the zones suitable for the gfp_mask contain at least one
* valid zone. It's possible to have an empty zonelist as a result
* of GFP_THISNODE and a memoryless node
*/
if (unlikely(!zonelist->_zonerefs->zone))
return NULL;
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
/* The preferred zone is used for statistics later */
/* 从high_zoneidx开始寻找用哪个zone分配内存 */
preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
nodemask ? : &cpuset_current_mems_allowed,
&preferred_zone);
if (!preferred_zone)
goto out;
classzone_idx = zonelist_zone_idx(preferred_zoneref);
#ifdef CONFIG_CMA
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
#endif
retry:
/* First allocation attempt */
/* 尝试分配物理页面,如果分配失败,会调用__alloc_pages_slowpath */
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
zonelist, high_zoneidx, alloc_flags,
preferred_zone, classzone_idx, migratetype);
if (unlikely(!page)) {
/*
* The first pass makes sure allocations are spread
* fairly within the local node. However, the local
* node might have free pages left after the fairness
* batches are exhausted, and remote zones haven't
* even been considered yet. Try once more without
* fairness, and include remote zones now, before
* entering the slowpath and waking kswapd: prefer
* spilling to a remote zone over swapping locally.
*/
if (alloc_flags & ALLOC_FAIR) {
reset_alloc_batches(zonelist, high_zoneidx,
preferred_zone);
alloc_flags &= ~ALLOC_FAIR;
goto retry;
}
/*
* Runtime PM, block IO and its error handling path
* can deadlock because I/O on the device might not
* complete.
*/
gfp_mask = memalloc_noio_flags(gfp_mask);
page = __alloc_pages_slowpath(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
preferred_zone, classzone_idx, migratetype);
}
trace_mm_page_alloc(page, order, gfp_mask, migratetype);
out:
/*
* When updating a task's mems_allowed, it is possible to race with
* parallel threads in such a way that an allocation can fail while
* the mask is being updated. If a page allocation is about to fail,
* check if the cpuset changed during allocation and if so, retry.
*/
if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return page;
}
static struct page *
get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
struct zone *preferred_zone, int classzone_idx, int migratetype)
{
struct zoneref *z;
struct page *page = NULL;
struct zone *zone;
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
int zlc_active = 0; /* set if using zonelist_cache */
int did_zlc_setup = 0; /* just call zlc_setup() one time */
bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
(gfp_mask & __GFP_WRITE);
zonelist_scan:
/*
* Scan zonelist, looking for a zone with enough free.
* See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
*/
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask) {
unsigned long mark;
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed_softwall(zone, gfp_mask))
continue;
/*
* Distribute pages in proportion to the individual
* zone size to ensure fair page aging. The zone a
* page was allocated in should have no effect on the
* time the page has in memory before being reclaimed.
*/
if (alloc_flags & ALLOC_FAIR) {
if (!zone_local(preferred_zone, zone))
continue;
if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
continue;
}
/*
* When allocating a page cache page for writing, we
* want to get it from a zone that is within its dirty
* limit, such that no single zone holds more than its
* proportional share of globally allowed dirty pages.
* The dirty limits take into account the zone's
* lowmem reserves and high watermark so that kswapd
* should be able to balance it without having to
* write pages from its LRU list.
*
* This may look like it could increase pressure on
* lower zones by failing allocations in higher zones
* before they are full. But the pages that do spill
* over are limited as the lower zones are protected
* by this very same mechanism. It should not become
* a practical burden to them.
*
* XXX: For now, allow allocations to potentially
* exceed the per-zone dirty limit in the slowpath
* (ALLOC_WMARK_LOW unset) before going into reclaim,
* which is important when on a NUMA setup the allowed
* zones are together not big enough to reach the
* global limit. The proper fix for these situations
* will require awareness of zones in the
* dirty-throttling and the flusher threads.
*/
if (consider_zone_dirty && !zone_dirty_ok(zone))
continue;
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
if (!zone_watermark_ok(zone, order, mark,
classzone_idx, alloc_flags)) {
int ret;
/* Checked here to keep the fast path fast */
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
if (alloc_flags & ALLOC_NO_WATERMARKS)
goto try_this_zone;
if (IS_ENABLED(CONFIG_NUMA) &&
!did_zlc_setup && nr_online_nodes > 1) {
/*
* we do zlc_setup if there are multiple nodes
* and before considering the first zone allowed
* by the cpuset.
*/
allowednodes = zlc_setup(zonelist, alloc_flags);
zlc_active = 1;
did_zlc_setup = 1;
}
if (zone_reclaim_mode == 0 ||
!zone_allows_reclaim(preferred_zone, zone))
goto this_zone_full;
/*
* As we may have just activated ZLC, check if the first
* eligible zone has failed zone_reclaim recently.
*/
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
ret = zone_reclaim(zone, gfp_mask, order);
switch (ret) {
case ZONE_RECLAIM_NOSCAN:
/* did not scan */
continue;
case ZONE_RECLAIM_FULL:
/* scanned but unreclaimable */
continue;
default:
/* did we reclaim enough */
if (zone_watermark_ok(zone, order, mark,
classzone_idx, alloc_flags))
goto try_this_zone;
/*
* Failed to reclaim enough to meet watermark.
* Only mark the zone full if checking the min
* watermark or if we failed to reclaim just
* 1<<order pages or else the page allocator
* fastpath will prematurely mark zones full
* when the watermark is between the low and
* min watermarks.
*/
if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
ret == ZONE_RECLAIM_SOME)
goto this_zone_full;
continue;
}
}
try_this_zone:
page = buffered_rmqueue(preferred_zone, zone, order,
gfp_mask, migratetype);
if (page)
break;
this_zone_full:
if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
zlc_mark_zone_full(zonelist, z);
}
if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
/* Disable zlc cache for second zonelist scan */
zlc_active = 0;
goto zonelist_scan;
}
if (page)
/*
* page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
* necessary to allocate the page. The expectation is
* that the caller is taking steps that will free more
* memory. The caller should avoid the page being used
* for !PFMEMALLOC purposes.
*/
page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
return page;
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
可以看到get_page_from_freelist应该是从highidx开始的zone逐个参数分配page。
最后通过buffered_rmqueue从伙伴系统中分配page
static inline
struct page *buffered_rmqueue(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
gfp_t gfp_flags, int migratetype)
{
unsigned long flags;
struct page *page;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
again:
/* order=1,即分配单个page从zone->pageset中分配 */
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
struct list_head *list;
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, cold);
if (unlikely(list_empty(list)))
goto failed;
}
if (cold)
page = list_entry(list->prev, struct page, lru);
else
page = list_entry(list->next, struct page, lru);
list_del(&page->lru);
pcp->count--;
} else {/* order大于0从伙伴系统中分配 */
if (unlikely(gfp_flags & __GFP_NOFAIL)) {
WARN_ON_ONCE(order > 1);
}
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
spin_unlock(&zone->lock);
if (!page)
goto failed;
__mod_zone_freepage_state(zone, -(1 << order),
get_freepage_migratetype(page));
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
if (prep_new_page(page, order, gfp_flags))
goto again;
return page;
failed:
local_irq_restore(flags);
return NULL;
}
但是对于上面两种情况最终都会经过__rmqueue->__rmqueue_smallest
static struct page *__rmqueue(struct zone *zone, unsigned int order,
int migratetype)
{
struct page *page;
retry_reserve:
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
page = __rmqueue_fallback(zone, order, migratetype);
/*
* Use MIGRATE_RESERVE rather than fail an allocation. goto
* is used because __rmqueue_smallest is an inline function
* and we want just one call site
*/
if (!page) {
migratetype = MIGRATE_RESERVE;
goto retry_reserve;
}
}
trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}
从order开始找zone中的空闲链表,如果zone的当前的order对应free_area中相应的migratetype类型的链表中没有空闲对象,则跳转到下一个order。反之则从链表中将内存块取下来。expand则将剩余的page放回伙伴系统(因为内存块都是2^order个page,而申请的大小则不一定都是2的幂)。
static inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
int migratetype)
{
unsigned int current_order;
struct free_area *area;
struct page *page;
/* Find a page of the appropriate size in the preferred list */
for (current_order = order; current_order < MAX_ORDER; ++current_order) {
area = &(zone->free_area[current_order]);
if (list_empty(&area->free_list[migratetype]))
continue;
page = list_entry(area->free_list[migratetype].next,
struct page, lru);
list_del(&page->lru);
rmv_page_order(page);
area->nr_free--;
expand(zone, page, order, current_order, area, migratetype);
set_freepage_migratetype(page, migratetype);
return page;
}
return NULL;
}
可以看到传入的low和high是初始的order和最终分配内存块的order。说明剩下的内存块一定可以放回到这个范围的链表中
static inline void expand(struct zone *zone, struct page *page,
int low, int high, struct free_area *area,
int migratetype)
{
unsigned long size = 1 << high;
while (high > low) {
area--;
high--;
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
#ifdef CONFIG_DEBUG_PAGEALLOC
if (high < debug_guardpage_minorder()) {
/*
* Mark as guard pages (or page), that will allow to
* merge back to allocator when buddy will be freed.
* Corresponding page table entries will not be touched,
* pages will stay not present in virtual address space
*/
INIT_LIST_HEAD(&page[size].lru);
set_page_guard_flag(&page[size]);
set_page_private(&page[size], high);
/* Guard pages are not available for any usage */
__mod_zone_freepage_state(zone, -(1 << high),
migratetype);
continue;
}
#endif
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
set_page_order(&page[size], high);
}
}