Linux内存子系统——分配物理页面(alloc_pages)
内核中常用的分配物理内存页面的接口函数是
alloc_pages()
,用于分配一个或多个连续的物理页面,分配的页面个数只能是2的整数次幂。
诸如
vmalloc
、
get_user_pages
、以及缺页中断中分配页面,都是通过该接口分配的物理页面。
分配页面
alloc_pages
函数有两个参数,一个是分配掩码gfp_mask
,另一个是分配阶数order。
[include/linux/gfp.h]
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
alloc_pages(gfp_mask, order)
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
分配掩码,和kmalloc
使用的是同一个掩码,同样定义在gfp.h中
/* Plain integer GFP bitmasks. Do not use this directly. */
#define ___GFP_DMA 0x01u
#define ___GFP_HIGHMEM 0x02u
#define ___GFP_DMA32 0x04u
#define ___GFP_MOVABLE 0x08u
#define ___GFP_WAIT 0x10u
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
#define ___GFP_COLD 0x100u
#define ___GFP_NOWARN 0x200u
#define ___GFP_REPEAT 0x400u
#define ___GFP_NOFAIL 0x800u
#define ___GFP_NORETRY 0x1000u
#define ___GFP_MEMALLOC 0x2000u
#define ___GFP_COMP 0x4000u
#define ___GFP_ZERO 0x8000u
#define ___GFP_NOMEMALLOC 0x10000u
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
分配掩码在内核中分成两类,一类叫zone modifiers
,另一类叫action modifiers
,zone指的是从哪个zone中分配所需要的页面。zone modifiers
由分配掩码的最低4位组成。
/*
* GFP bitmasks..
*
* Zone modifiers (see linux/mmzone.h - low three bits)
*
* Do not put any conditional on these. If necessary modify the definitions
* without the underscores and use them consistently. The definitions here may
* be used in bit comparisons.
*/
#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
action modifiers
和分配行为有关,其定义如下:
/*
* Action modifiers - doesn't change the zoning
*
* __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
* _might_ fail. This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
* cannot handle allocation failures. This modifier is deprecated and no new
* users should be added.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*
* __GFP_MOVABLE: Flag that this page will be movable by the page migration
* mechanism or reclaimed
*/
#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
* This takes precedence over the
* __GFP_MEMALLOC flag if both are
* set
*/
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
这些分配掩码看着就令人头大,不过正常情况下,不用特别关心这些掩码,正如kmalloc时,基本都直接填GFP_KERNEL
。直接使用的各类掩码定义如下:
/* This equals 0, but use constants in case they ever change */
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_NOIO (__GFP_WAIT)
#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
__GFP_RECLAIMABLE)
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
alloc_pages
调用关系如下:
alloc_pages(gfp_mask, order)
alloc_pages_node(numa_node_id(), gfp_mask, order)
__alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
__alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
/*
* This is the 'heart' of the zoned buddy allocator.
*/
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, nodemask_t *nodemask)
{
struct zoneref *preferred_zoneref;
struct page *page = NULL;
unsigned int cpuset_mems_cookie;
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = { (1)
.high_zoneidx = gfp_zone(gfp_mask),
.nodemask = nodemask,
.migratetype = ``(gfp_mask), (2)
};
......
}
1)struct alloc_context
数据结构是伙伴系统分配函数中用于保存相关参数的数据结构。gfp_zone
函数用于从分配掩码中计算出zone的zoneidx,并存放在high_zoneidx
成员中。
static inline enum zone_type gfp_zone(gfp_t flags)
{
enum zone_type z;
int bit = (__force int) (flags & GFP_ZONEMASK);
z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
((1 << ZONES_SHIFT) - 1);
VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
return z;
}
GFP_KERNEL
带入到gfp_zone
接口中,则最终结果为0,即high_zoneidx
为0。
2)gfpflags_to_migratetype
把gfp_mask
分配掩码转换成MIGRATE_TYPES
,例如分配掩码为GFP_KERNEL
,那么MIGRATE_TYPES
类型是MIGRATE_UNMOVABLE
;如果分配掩码为GFP_HIGHUSER_MOVABLE
,那么MIGRATE_TYPES
类型是MIGRATE_MOVABLE
。
/* Convert GFP flags to their corresponding migrate type */
static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
{
WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
((gfp_flags & __GFP_RECLAIMABLE) != 0);
}
继续回到__alloc_pages_nodemask
中
/*
* This is the 'heart' of the zoned buddy allocator.
*/
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, nodemask_t *nodemask)
{
struct zoneref *preferred_zoneref;
......
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
/* We set it here, as __alloc_pages_slowpath might have changed it */
ac.zonelist = zonelist;
/* The preferred zone is used for statistics later */
preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
ac.nodemask ? : &cpuset_current_mems_allowed,
&ac.preferred_zone);
if (!ac.preferred_zone)
goto out;
ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
/* First allocation attempt */
alloc_mask = gfp_mask|__GFP_HARDWALL;
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
if (unlikely(!page)) {
/*
* Runtime PM, block IO and its error handling path
* can deadlock because I/O on the device might not
* complete.
*/
alloc_mask = memalloc_noio_flags(gfp_mask);
page = __alloc_pages_slowpath(alloc_mask, order, &ac);
}
if (kmemcheck_enabled && page)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
out:
/*
* When updating a task's mems_allowed, it is possible to race with
* parallel threads in such a way that an allocation can fail while
* the mask is being updated. If a page allocation is about to fail,
* check if the cpuset changed during allocation and if so, retry.
*/
if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return page;
}
首先get_page_from_freelist
会去尝试分配物理页面,如果这里分配失败,就会调用到__alloc_pages_slowpath
这个函数。本文主要介绍前者。
/*
* get_page_from_freelist goes through the zonelist trying to allocate
* a page.
*/
static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
const struct alloc_context *ac)
{
struct zonelist *zonelist = ac->zonelist;
struct zoneref *z;
struct page *page = NULL;
struct zone *zone;
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
int zlc_active = 0; /* set if using zonelist_cache */
int did_zlc_setup = 0; /* just call zlc_setup() one time */
bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
(gfp_mask & __GFP_WRITE);
int nr_fair_skipped = 0;
bool zonelist_rescan;
zonelist_scan:
zonelist_rescan = false;
/*
* Scan zonelist, looking for a zone with enough free.
* See also __cpuset_node_allowed() comment in kernel/cpuset.c.
*/
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
ac->nodemask) {
函数首先需要判断可以从哪个zone来分配内存。for_each_zone_zonelist_nodemask
宏扫描内存节点中的zonelist
去查找合适分配内存的zone。
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
for_each_zone_zonelist_nodemask
首先通过first_zones_zonelist
从给定的zoneidx
开始查找,这个给定的zoneidx
就是highidx
,之前通过gfp_zone函数转换得来的。
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
nodemask_t *nodes,
struct zone **zone)
{
struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,
highest_zoneidx, nodes);
*zone = zonelist_zone(z);
return z;
}
first_zones_zonelist
会调用next_zones_zonelist
来计算zoneref
,最后返回zone数据结构。
/* Returns the next zone at or below highest_zoneidx in a zonelist */
struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
nodemask_t *nodes)
{
/*
* Find the next suitable zone to use for the allocation.
* Only filter based on nodemask if it's set
*/
if (likely(nodes == NULL))
while (zonelist_zone_idx(z) > highest_zoneidx)
z++;
else
while (zonelist_zone_idx(z) > highest_zoneidx ||
(z->zone && !zref_in_nodemask(z, nodes)))
z++;
return z;
}
计算zone的核心函数在next_zones_zonelist
函数中,这里highest_zoneidx
是gfp_zone
函数计算分配掩码得来。zonelist
有一个zoneref
数组,zoneref
数据结构里有一个成员zone指针会指向zone数据结构,还有一个zone_index
成员指向zone的编号。zone在系统处理时会初始化这个数组。具体函数在build_zonelists_node
中。在不考虑DMA情况下,zone类型、zoneref[]
数组和zoneidx
的关系如下
ZONE_HIGHMEM _zonerefs[0]->zone_index=1
ZONE_NORMAL _zonerefs[1]->zone_index=0
zonerefs[0]
表示ZONE_HIGHMEM
,其zone的编号zone_index
值为1;zonerefs[1]
表示ZONE_NORMAL
,其zone的编号zone_index
为0。也就是说,内存分配时会优先考虑ZONE_HIGHMEM
,因为ZONE_HIGHMEM
在zonelist
中排在ZONE_NORMAL
前面。但是也要考虑分配掩码。
当GFP_KERNEL
时,gfp_zone(GFP_KERNEL)
函数返回0,即highest_zoneidx
为0,而这个内存节点的第一个zone是ZONE_HIGHMEM
,其zone编号zone_index
的值为1。因此在next_zone_zonelist
中,z++,最终first_zones_zonelist
函数会返回ZONE_NORMAL
。在for_each_zone_zonelist_nodemask
遍历中也只能遍历ZONE_NORMAL
这一个zone了。
当GFP_HIGHUSER_MOVABLE
分配掩码,GFP_HIGHUSER_MOVABLE
包含了__GFP_HIGHMEM
,那么next_zone_zonelist
函数。因为gfp_zone(GFP_HIGHUSER_MOVABLE0)
函数等于2,即highest_zoneidx
为2。而这个内存节点的第一个ZONE_HIGHMEM
,其zone编号zone_index
的值为1。所以返回第一个zone是HIGHMEM
,其zone编号zone_index
为1。
随后会从HIGHMEM zone
中尝试分配内存,大概不满足条件时,会执行for_each_zone_zonelist_nodemask
查找下一个zone,即NORMAL zone
。
要正确理解for_each_zone_zonelist_nodemask
,需要理解以下两点。
- highest_zoneidx是如何计算来的,即如何解析分配掩码,这是
gfp_zone
函数的职责。 - 每个内存节点有一个
struct pglist_data
数据结构,对于非numa结构,只有一个内存节点。其成员node_zonelists
是一个struct zonelist
数据结构,zonelist
中包含了struct zoneref _zonerefs[]
数组来描述这些zone,其中ZONE_HIGHMEM
排在前面,其_zonerefs[0]->zone_index=1
,ZONE_NORMAL
排在后面,且_zonerefs[1]->zone_index=0
。
__alloc_pages_nodemask
中调用first_zones_zonelist
,计算出preferred_zoneref
并保存到ac.classzone_idx
变量中,该变量在kswapd
内核线程中还会用到。
回到get_page_from_freelist
函数,for_each_zone_zonelist_nodemask
找到了用于分配的zone,下面开始做一下分配内存的准备。
get_page_from_freelist
......
unsigned long mark;
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed(zone, gfp_mask))
continue;
/*
* Distribute pages in proportion to the individual
* zone size to ensure fair page aging. The zone a
* page was allocated in should have no effect on the
* time the page has in memory before being reclaimed.
*/
if (alloc_flags & ALLOC_FAIR) {
if (!zone_local(ac->preferred_zone, zone))
break;
if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
nr_fair_skipped++;
continue;
}
}
然后检查zone的watermark
水位是否充足。
get_page_from_freelist
......
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
if (!zone_watermark_ok(zone, order, mark,
ac->classzone_idx, alloc_flags)) {
......
ret = zone_reclaim(zone, gfp_mask, order);
switch (ret) {
case ZONE_RECLAIM_NOSCAN:
/* did not scan */
continue;
case ZONE_RECLAIM_FULL:
/* scanned but unreclaimable */
continue;
default:
/* did we reclaim enough */
if (zone_watermark_ok(zone, order, mark,
ac->classzone_idx, alloc_flags))
goto try_this_zone;
/*
* Failed to reclaim enough to meet watermark.
* Only mark the zone full if checking the min
* watermark or if we failed to reclaim just
* 1<<order pages or else the page allocator
* fastpath will prematurely mark zones full
* when the watermark is between the low and
* min watermarks.
*/
if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
ret == ZONE_RECLAIM_SOME)
goto this_zone_full;
continue;
}
}
try_this_zone:
page = buffered_rmqueue(ac->preferred_zone, zone, order,
gfp_mask, ac->migratetype);
if (page) {
if (prep_new_page(page, order, gfp_mask, alloc_flags))
goto try_this_zone;
return page;
}
zone数据结构中有一个成员watermark
记录各种水位的情况。系统中定义了3中水位,分别是WMARK_MIN
、WMARK_LOW
和WMARK_HIGH
。watermark
水位计算在__setup_per_zone_wmarks()
函数中。
zone_watermark_ok
会判断当前zone的空间空间是否满足WMARK_LOW
水位。返回true代表空闲页面在某个水位之上,否则返回false。
当判断当前zone的空闲页面低于WMARK_LOW
水位,会调用zone_reclaim
函数来回收页面。我们这里假设zone_watermark_ok
判断页面充沛。就会调用buffered_rmqueue
函数从伙伴系统中分配物理页面。
__alloc_pages_nodemask->get_page_from_freelist->buffered_rmqueue
/*
* Allocate a page from the given zone. Use pcplists for order-0 allocations.
*/
static inline
struct page *buffered_rmqueue(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
gfp_t gfp_flags, int migratetype)
{
unsigned long flags;
struct page *page;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
struct list_head *list;
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, cold);
if (unlikely(list_empty(list)))
goto failed;
}
if (cold)
page = list_entry(list->prev, struct page, lru);
else
page = list_entry(list->next, struct page, lru);
list_del(&page->lru);
pcp->count--;
} else {
if (unlikely(gfp_flags & __GFP_NOFAIL)) {
/*
* __GFP_NOFAIL is not to be used in new code.
*
* All __GFP_NOFAIL callers should be fixed so that they
* properly detect and handle allocation failures.
*
* We most definitely don't want callers attempting to
* allocate greater than order-1 page units with
* __GFP_NOFAIL.
*/
WARN_ON_ONCE(order > 1);
}
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
spin_unlock(&zone->lock);
if (!page)
goto failed;
__mod_zone_freepage_state(zone, -(1 << order),
get_freepage_migratetype(page));
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
!test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
return page;
failed:
local_irq_restore(flags);
return NULL;
}
order等于0的情况和其他区别对待,只分配一个物理页面时,从zone->per_cpu_pageset
列表中分配;另一路order大于0的情况,就从伙伴系统中分配。order大于0的情况,最终会调用__rmqueue_smallest
函数
__alloc_pages_nodemask->get_page_from_freelist->buffered_rmqueue->__rmqueue->__rmqueue_smallest
static inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
int migratetype)
{
unsigned int current_order;
struct free_area *area;
struct page *page;
/* Find a page of the appropriate size in the preferred list */
for (current_order = order; current_order < MAX_ORDER; ++current_order) {
area = &(zone->free_area[current_order]);
if (list_empty(&area->free_list[migratetype]))
continue;
page = list_entry(area->free_list[migratetype].next,
struct page, lru);
list_del(&page->lru);
rmv_page_order(page);
area->nr_free--;
expand(zone, page, order, current_order, area, migratetype);
set_freepage_migratetype(page, migratetype);
return page;
}
return NULL;
}
在__rmqueue_smallest
函数中,首先从order开始查找zone中空闲链表。如果zone的当前order对应的空闲区free_area中相应migratetype类型的链表里没有空闲对象,那么就会查找下一级order。
系统启动时,memblock后期,会释放空闲页面到buddy子系统中,空闲页面会尽可能的分配到MAX_ORDER-1的链表中,这个可以在文件系统中,通过cat /proc/pagetypeinfo
命令看出端倪。当找到某一个order空闲区中对应的migratetype类型的空闲链表中有空闲内存块时,就会从中把一个内存块摘下来,然后调用expand函数来“切蛋糕”。因为通常摘下来的内存块要比需要的内存大,切完之后需要把剩下的内存块重新放进伙伴系统中。
expadn函数就是实现切蛋糕功能,这里参数high就是current_order,通常current_order要比需求的order大。每比较一次,area减1,相当于退了一级order,最后通过list_add把剩下的内存块添加进低一级的空闲链表中。
__alloc_pages_nodemask->get_page_from_freelist->buffered_rmqueue->__rmqueue->__rmqueue_smallest->expand
static inline void expand(struct zone *zone, struct page *page,
int low, int high, struct free_area *area,
int migratetype)
{
unsigned long size = 1 << high;
while (high > low) {
area--;
high--;
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
debug_guardpage_enabled() &&
high < debug_guardpage_minorder()) {
/*
* Mark as guard pages (or page), that will allow to
* merge back to allocator when buddy will be freed.
* Corresponding page table entries will not be touched,
* pages will stay not present in virtual address space
*/
set_page_guard(zone, &page[size], high, migratetype);
continue;
}
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
set_page_order(&page[size], high);
}
}
所需要的页面分配成功后,__rmqueue函数返回这个内存块的起始页面的struct page数据结构。回到buffered_rmqueue函数,最后还需要利用zone_statistics
函数做一些统计数据的计算。
回到get_page_from_freelist
中,最后还要通过prep_new_page()
函数做一些检查。
__alloc_pages_nodemask->get_page_from_freelist->prep_new_page
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
int alloc_flags)
{
int i;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (unlikely(check_new_page(p)))
return 1;
}
set_page_private(page, 0);
set_page_refcounted(page);
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
kasan_alloc_pages(page, order);
if (gfp_flags & __GFP_ZERO)
prep_zero_page(page, order, gfp_flags);
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
set_page_owner(page, order, gfp_flags);
/*
* page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking
* steps that will free more memory. The caller should avoid the page
* being used for !PFMEMALLOC purposes.
*/
page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
return 0;
}
首先通过check_new_page
检查struct page成员属性
static inline int check_new_page(struct page *page)
{
const char *bad_reason = NULL;
unsigned long bad_flags = 0;
if (unlikely(page_mapcount(page)))
bad_reason = "nonzero mapcount";
if (unlikely(page->mapping != NULL))
bad_reason = "non-NULL mapping";
if (unlikely(atomic_read(&page->_count) != 0))
bad_reason = "nonzero _count";
if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
}
#ifdef CONFIG_MEMCG
if (unlikely(page->mem_cgroup))
bad_reason = "page still charged to cgroup";
#endif
if (unlikely(bad_reason)) {
bad_page(page, bad_reason, bad_flags);
return 1;
}
return 0;
}
- 刚分配的struct page的_mapcount计数应该为0。
- page->mapping应该为NULL。
- 判断page->_count是否为0。注意alloc_pages分配的page的_count应该为1,但是这里为0,这个函数之后,在
prep_new_page
中还会调用set_page_refcounted->set_page_count
增加_count计数值,把_count设置为1。 - 检查page->flags,
PAGE_FLAGS_CHECK_AT_PREP
通常来说代表flags低21位,flag这部分,在分配页面时,应该是被清除状态。如果这些bit被置位,则页面分配存在问题。大概率是页面被使用过,且释放时没有清标志位。
随后prep_new_page
再设置一些参数后,分配的页面就合格了,可以出厂提供各个模块使用了。当然,还是需要建立页表后才能访问对应的内存。