4. 分配物理页面(伙伴系统分配内存)linux4.0

思考题

  • 请简述Linux内核在理想情况下页面分配器(page allocator)是如何分配出连续物理页面的。

答:linux内核分配连续物理页面调用alloc_pages函数,alloc_pages函数包含两个参数,1. 分配掩码,2. 伙伴系统的order。分配掩码又分为zone的修饰符和action修饰符,前者确定可以优先从哪个zone开始扫描,后者确定物理页面的迁移类型。根据得到的zone的序号/action/order,三个值,确定了分配的路径。如果在指定的zone/迁移类型/order上分配不到连续的物理页面,则会考虑从不同的zone,不同的迁移类型 和不同的order去分配物理页面。

  • 在页面分配器中,如何从分配掩码(gfp_mask)中确定可以从哪些zone中分配内存?

答:gfp_mask的低4位用于确定分配的zone,但是在build zone的时候已经确定好了zone的顺序,一般优先从high -> normal->DMA32->DMA 这个顺序分配

  • 页面分配器是按照什么方向来扫描zone的?

答:一般优先从high -> normal->DMA32->DMA 这个顺序分配

  • 为用户进程分配物理内存,分配掩码应该选用GFP_KERNEL,还是GFP_HIGHUSER_MOVABLE呢?

答:应该选择GFP_HIGHUSER_MOVABLE,因为此宏分配的内存是可迁移的,而GFP_KERNEL,分配的是不可迁移的内存类型。

伙伴系统分配内存:

    内核中常用的分配物理内存页面的接口函数是alloc_pages(),用于分配一个或者多个连续的物理页面,分配的页面个数只能是2的整数次幂。相比于多次分配离散的物理页面,分配连续的物理页面有利于提高系统内存的碎片化,内存碎片化是一个很让人头疼的问题。alloc_pages()函数的参数有两个,一个是分配掩码gfp_mask,另一个是分配阶数order。

include\linux\gfp.h

#define alloc_pages(gfp_mask, order) \
        alloc_pages_node(numa_node_id(), gfp_mask, order)

分配掩码是非常重要的参数,它同样定义在gfp.h头文件中。

/* Plain integer GFP bitmasks. Do not use this directly. */
#define ___GFP_DMA      0x01u
#define ___GFP_HIGHMEM      0x02u
#define ___GFP_DMA32        0x04u
#define ___GFP_MOVABLE      0x08u
#define ___GFP_WAIT     0x10u /* 表示分配内存的请求可以中断,也就是说,调度器在请求期间可以随意选择另一个过程执行,
                                或者该请求可以被另一个更重要的事件中断。分配器还可以在返回内存之前,在队列上等待一个事件*/
#define ___GFP_HIGH     0x20u /*如果请求非常重要,则设置此标志,即内核急切地需要内存时,在分配内存失败可能给内存带来
                                严重后果时(比如威胁到系统稳定性或者系统崩溃),总是会使用该标志*/
#define ___GFP_IO       0x40u /*说明在查找空闲内存期间内核可以进行I/O操作,实际上,这意味着如果内核在内存分配期间换出页
                                那么仅当设置该标志时,才能将选择的页写入硬盘*/
#define ___GFP_FS       0x80u    /*允许内核执行VFS操作,在与VFS层有联系的内核子系统中必须禁用,因为这可能引起递归调用。*/
#define ___GFP_COLD     0x100u /*如果需要分配不在CPU高速缓存中的冷页时,则设置此标志*/
#define ___GFP_NOWARN       0x200u /*在分配失败时禁止内核故障警告。在极少数场合该标志有用*/
#define ___GFP_REPEAT       0x400u /*在分配失败后自动重试,但在尝试若干次之后停止*/
#define ___GFP_NOFAIL       0x800u /*在分配失败后一直重试,直到成功*/
#define ___GFP_NORETRY      0x1000u
#define ___GFP_MEMALLOC     0x2000u
#define ___GFP_COMP     0x4000u
#define ___GFP_ZERO     0x8000u     /*在分配成功时,将返回填充字节0的页面*/
#define ___GFP_NOMEMALLOC   0x10000u
#define ___GFP_HARDWALL     0x20000u /*只在NUMA系统上有意义。它限制只在分配当前进程的各个CPU所关联的结点分配内存。*/
#define ___GFP_THISNODE     0x40000u /*只有在NUMA系统上有意义。如果设置该比特位,则内存分配失败的情况下不允许使用其他结点作为备用*/
#define ___GFP_RECLAIMABLE  0x80000u
#define ___GFP_NOTRACK      0x200000u
#define ___GFP_NO_KSWAPD    0x400000u
#define ___GFP_OTHER_NODE   0x800000u
#define ___GFP_WRITE        0x1000000u

分配掩码在内核代码中分成两类,一类叫zone modifiers,另一类叫action modifiers。zone modifiers指定从哪个zone中分配所需的页面,zone modifiers由分配掩码的最低4位来定义,分别__GFP_DMA __GFP_HIGHMEM __GFP_DMA32 __GFP_MOVABLE。

/*
 * GFP bitmasks..
 *
 * Zone modifiers (see linux/mmzone.h - low three bits)
 *
 * Do not put any conditional on these. If necessary modify the definitions
 * without the underscores and use them consistently. The definitions here may
 * be used in bit comparisons.
 */
#define __GFP_DMA   ((__force gfp_t)___GFP_DMA)
#define __GFP_HIGHMEM   ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE   ((__force gfp_t)___GFP_MOVABLE)  /* Page is movable */
#define GFP_ZONEMASK    (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)

action modifiers 并不限制从哪个内存域中分配内存,但会改变分配行为,其定义如下:

/*
 * Action modifiers - doesn't change the zoning
 *
 * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
 * _might_ fail.  This depends upon the particular VM implementation.
 *
 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
 * cannot handle allocation failures.  This modifier is deprecated and no new
 * users should be added.
 *
 * __GFP_NORETRY: The VM implementation must not retry indefinitely.
 *
 * __GFP_MOVABLE: Flag that this page will be movable by the page migration
 * mechanism or reclaimed
 */
#define __GFP_WAIT  ((__force gfp_t)___GFP_WAIT)    /* Can wait and reschedule? */
#define __GFP_HIGH  ((__force gfp_t)___GFP_HIGH)    /* Should access emergency pools? */
#define __GFP_IO    ((__force gfp_t)___GFP_IO)  /* Can start physical IO? */
#define __GFP_FS    ((__force gfp_t)___GFP_FS)  /* Can call down to low-level FS? */
#define __GFP_COLD  ((__force gfp_t)___GFP_COLD)    /* Cache-cold page required */
#define __GFP_NOWARN    ((__force gfp_t)___GFP_NOWARN)  /* Suppress page allocation failure warning */
#define __GFP_REPEAT    ((__force gfp_t)___GFP_REPEAT)  /* See above */
#define __GFP_NOFAIL    ((__force gfp_t)___GFP_NOFAIL)  /* See above */
#define __GFP_NORETRY   ((__force gfp_t)___GFP_NORETRY) /* See above */
#define __GFP_MEMALLOC  ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
#define __GFP_COMP  ((__force gfp_t)___GFP_COMP)    /* Add compound page metadata */
#define __GFP_ZERO  ((__force gfp_t)___GFP_ZERO)    /* Return zeroed page on success */
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
                             * This takes precedence over the
                             * __GFP_MEMALLOC flag if both are
                             * set
                             */
#define __GFP_HARDWALL   ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE  ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK   ((__force gfp_t)___GFP_NOTRACK)  /* Don't track with kmemcheck */

#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)   /* Allocator intends to dirty page */

下面以GFP_KERNEL为例,来看在理想情况下alloc_pages()函数是如何分配出物理内存的。

page = alloc_pages(GFP_KERNEL, order)

GFP_KERNEL分配掩码定义在gfp.h头文件中,是一个分配掩码的组合。常用的分配掩码组合如下:

/* This equals 0, but use constants in case they ever change */
#define GFP_NOWAIT  (GFP_ATOMIC & ~__GFP_HIGH)
/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
#define GFP_ATOMIC  (__GFP_HIGH)
#define GFP_NOIO    (__GFP_WAIT)
#define GFP_NOFS    (__GFP_WAIT | __GFP_IO)
#define GFP_KERNEL  (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_TEMPORARY   (__GFP_WAIT | __GFP_IO | __GFP_FS | \
             __GFP_RECLAIMABLE)
#define GFP_USER    (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_HIGHUSER    (GFP_USER | __GFP_HIGHMEM)
#define GFP_HIGHUSER_MOVABLE    (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_IOFS    (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE   (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
             __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
             __GFP_NO_KSWAPD)

所以GFP_KERNEL分配掩码包含了__GFP_WAIT  __GFP_IO  __GFP_FS这3个标志 即0x10 | 0x40 | 0x80 = 0xd0

    alloc_pages()最终调用__alloc_pages_nodemask()函数,它是伙伴系统的核心函数。

alloc_pages()->alloc_pages_node()->__alloc_pages()->__alloc_pages_nodemask()

#define alloc_pages(gfp_mask, order) \
        alloc_pages_node(numa_node_id(), gfp_mask, order)

static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
                        unsigned int order)
{
    /* Unknown node is current node */
    if (nid < 0)
        nid = numa_node_id();

    return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}

static inline struct page *
__alloc_pages(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist)
{
    return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
}

/*
 * This is the 'heart' of the zoned buddy allocator.
 */
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
            struct zonelist *zonelist, nodemask_t *nodemask)
{
    struct zoneref *preferred_zoneref;
    struct page *page = NULL;
    unsigned int cpuset_mems_cookie;
    int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
    gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */

    /* struct alloc_context数据结构是伙伴系统分配函数中用于保存相关参数的数据结构,
     * gfp_zone()数据从分配掩码中计算出zone的zoneidx,并存放在high_zoneidx成员中。
     * 一般分配原则是从低成本内存到高成本内存,计算方法很奇怪,看不懂
     */
    struct alloc_context ac = {
        .high_zoneidx = gfp_zone(gfp_mask),
        .nodemask = nodemask,
        .migratetype = gfpflags_to_migratetype(gfp_mask),/*把gfp_mask分配掩码转换成MIGRATE_TYPES类型,
            例如分配掩码为GFP_KERNEL,那么MIGRATE_TYPES类型是MIGRATE_UNMOVABLE,如果分配掩码为GFP_HIGHUSER_MOVABLE,
            那么MIGRATE_TYPES类型是MIGRATE_MOVABLE.
        */
    };

    gfp_mask &= gfp_allowed_mask;

    lockdep_trace_alloc(gfp_mask);

    might_sleep_if(gfp_mask & __GFP_WAIT);

    if (should_fail_alloc_page(gfp_mask, order))
        return NULL;

    /*
     * Check the zones suitable for the gfp_mask contain at least one
     * valid zone. It's possible to have an empty zonelist as a result
     * of GFP_THISNODE and a memoryless node
     */
    if (unlikely(!zonelist->_zonerefs->zone))
        return NULL;

    if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
        alloc_flags |= ALLOC_CMA;

retry_cpuset:
    cpuset_mems_cookie = read_mems_allowed_begin();

    /* We set it here, as __alloc_pages_slowpath might have changed it */
    ac.zonelist = zonelist;
    /* The preferred zone is used for statistics later */
    preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
                ac.nodemask ? : &cpuset_current_mems_allowed,
                &ac.preferred_zone);
    if (!ac.preferred_zone)
        goto out;
    ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);

    /* First allocation attempt */
    alloc_mask = gfp_mask|__GFP_HARDWALL;
    /*get_page_from_freelist 会去尝试分配物理页面,如果这里分配失败,就会调用到__alloc_pages_slowpath函数,
        这个函数处理很多特殊场景,参见下面get_page_from_freelist实现*/
    page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
    if (unlikely(!page)) {
        /*
         * Runtime PM, block IO and its error handling path
         * can deadlock because I/O on the device might not
         * complete.
         */
        alloc_mask = memalloc_noio_flags(gfp_mask);

        page = __alloc_pages_slowpath(alloc_mask, order, &ac);
    }

    if (kmemcheck_enabled && page)
        kmemcheck_pagealloc_alloc(page, order, gfp_mask);

    trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);

out:
    /*
     * When updating a task's mems_allowed, it is possible to race with
     * parallel threads in such a way that an allocation can fail while
     * the mask is being updated. If a page allocation is about to fail,
     * check if the cpuset changed during allocation and if so, retry.
     */
    if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
        goto retry_cpuset;

    return page;
}

get_page_from_freelist()是伙伴系统使用的另一个重要的辅助函数。它通过标志集和分配阶来判断是否能进行分配。如果可以,则发起实际的分配操作。

/*
 * get_page_from_freelist goes through the zonelist trying to allocate
 * a page.
 */
static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
                        const struct alloc_context *ac)
{
    struct zonelist *zonelist = ac->zonelist;
    struct zoneref *z;
    struct page *page = NULL;
    struct zone *zone;
    nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
    int zlc_active = 0;     /* set if using zonelist_cache */
    int did_zlc_setup = 0;      /* just call zlc_setup() one time */
    bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
                (gfp_mask & __GFP_WRITE);
    int nr_fair_skipped = 0;
    bool zonelist_rescan;

zonelist_scan:
    zonelist_rescan = false;

    /*
     * Scan zonelist, looking for a zone with enough free.
     * See also __cpuset_node_allowed() comment in kernel/cpuset.c. cpuset用于绑定进程,提高效率
     */
    /*
     * 首先需要判断从哪个zone来分配内存,。for_each_zone_zonelist_nodemask 宏扫描内存结点中的zonelist去查找合适分配内存的zone。
     * 扫描zonelist,寻找具有足够空闲空间的内存域。
     */

     /* 首先解释ALLOC_*标志(cpuset_zone_allowed()是另外一个辅助函数,用于检查给定内存域是否属于该进程允许运行的CPU. zone_watermark_ok()接下来
     ** 检查所遍历到的内存域是否有足够的空闲页,并试图分配一个连续内存块。如果两个条件之一不满足,即或者没有足够的空闲页,或者没有连续内存块可满足分配请求,则
     ** 循环进行到备用列表中的下一个内存域,作同样的检查。
     ** 如果内存域适用于当前的分配请求,那么buffered_rmqueue()试图从中分配所需数目的页。
     */
    for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
                                ac->nodemask) {
        unsigned long mark;

        if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
            !zlc_zone_worth_trying(zonelist, z, allowednodes))
                continue;
        if (cpusets_enabled() &&
            (alloc_flags & ALLOC_CPUSET) &&
            !cpuset_zone_allowed(zone, gfp_mask))
                continue;
        /*
         * Distribute pages in proportion to the individual
         * zone size to ensure fair page aging.  The zone a
         * page was allocated in should have no effect on the
         * time the page has in memory before being reclaimed.
         */
        if (alloc_flags & ALLOC_FAIR) {
            if (!zone_local(ac->preferred_zone, zone))
                break;
            if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
                nr_fair_skipped++;
                continue;
            }
        }
        /*
         * When allocating a page cache page for writing, we
         * want to get it from a zone that is within its dirty
         * limit, such that no single zone holds more than its
         * proportional share of globally allowed dirty pages.
         * The dirty limits take into account the zone's
         * lowmem reserves and high watermark so that kswapd
         * should be able to balance it without having to
         * write pages from its LRU list.
         *
         * This may look like it could increase pressure on
         * lower zones by failing allocations in higher zones
         * before they are full.  But the pages that do spill
         * over are limited as the lower zones are protected
         * by this very same mechanism.  It should not become
         * a practical burden to them.
         *
         * XXX: For now, allow allocations to potentially
         * exceed the per-zone dirty limit in the slowpath
         * (ALLOC_WMARK_LOW unset) before going into reclaim,
         * which is important when on a NUMA setup the allowed
         * zones are together not big enough to reach the
         * global limit.  The proper fix for these situations
         * will require awareness of zones in the
         * dirty-throttling and the flusher threads.
         */
        if (consider_zone_dirty && !zone_dirty_ok(zone))
            continue;

        mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
        if (!zone_watermark_ok(zone, order, mark,
                       ac->classzone_idx, alloc_flags)) {
            int ret;

            /* Checked here to keep the fast path fast */
            BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
            if (alloc_flags & ALLOC_NO_WATERMARKS)
                goto try_this_zone;

            if (IS_ENABLED(CONFIG_NUMA) &&
                    !did_zlc_setup && nr_online_nodes > 1) {
                /*
                 * we do zlc_setup if there are multiple nodes
                 * and before considering the first zone allowed
                 * by the cpuset.
                 */
                allowednodes = zlc_setup(zonelist, alloc_flags);
                zlc_active = 1;
                did_zlc_setup = 1;
            }

            if (zone_reclaim_mode == 0 ||
                !zone_allows_reclaim(ac->preferred_zone, zone))
                goto this_zone_full;

            /*
             * As we may have just activated ZLC, check if the first
             * eligible zone has failed zone_reclaim recently.
             */
            if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
                !zlc_zone_worth_trying(zonelist, z, allowednodes))
                continue;

            ret = zone_reclaim(zone, gfp_mask, order);
            switch (ret) {
            case ZONE_RECLAIM_NOSCAN:
                /* did not scan */
                continue;
            case ZONE_RECLAIM_FULL:
                /* scanned but unreclaimable */
                continue;
            default:
                /* did we reclaim enough */
                if (zone_watermark_ok(zone, order, mark,
                        ac->classzone_idx, alloc_flags))
                    goto try_this_zone;

                /*
                 * Failed to reclaim enough to meet watermark.
                 * Only mark the zone full if checking the min
                 * watermark or if we failed to reclaim just
                 * 1<<order pages or else the page allocator
                 * fastpath will prematurely mark zones full
                 * when the watermark is between the low and
                 * min watermarks.
                 */
                if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
                    ret == ZONE_RECLAIM_SOME)
                    goto this_zone_full;

                continue;
            }
        }

try_this_zone:
        page = buffered_rmqueue(ac->preferred_zone, zone, order,
                        gfp_mask, ac->migratetype);
        if (page) {
            if (prep_new_page(page, order, gfp_mask, alloc_flags))
                goto try_this_zone;
            return page;
        }
this_zone_full:
        if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
            zlc_mark_zone_full(zonelist, z);
    }

    /*
     * The first pass makes sure allocations are spread fairly within the
     * local node.  However, the local node might have free pages left
     * after the fairness batches are exhausted, and remote zones haven't
     * even been considered yet.  Try once more without fairness, and
     * include remote zones now, before entering the slowpath and waking
     * kswapd: prefer spilling to a remote zone over swapping locally.
     */
    if (alloc_flags & ALLOC_FAIR) {
        alloc_flags &= ~ALLOC_FAIR;
        if (nr_fair_skipped) {
            zonelist_rescan = true;
            reset_alloc_batches(ac->preferred_zone);
        }
        if (nr_online_nodes > 1)
            zonelist_rescan = true;
    }

    if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
        /* Disable zlc cache for second zonelist scan */
        zlc_active = 0;
        zonelist_rescan = true;
    }

    if (zonelist_rescan)
        goto zonelist_scan;

    return NULL;
}
/**
 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 * @nodemask - Nodemask allowed by the allocator
 *
 * This iterator iterates though all zones at or below a given zone index and
 * within a given nodemask
 */
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \    
for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \        
             zone;                           \        
             z = next_zones_zonelist(++z, highidx, nodemask),    \
            zone = zonelist_zone(z))            \
/* 此宏首先通过first_zones_zonelist() 从给定的zoneidx开始查找,这个给定的zoneidx就是highidx,之前通过gfp_zone()函数转换得来的。*/

first_zones_zonelist()函数会调用next_zones_zonelist()函数来计算zoneref,最后返回 zone 数据结构。

/**
 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
 * @zonelist - The zonelist to search for a suitable zone
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
 * @zone - The first suitable zone found is returned via this parameter
 *
 * This function returns the first zone at or below a given zone index that is
 * within the allowed nodemask. The zoneref returned is a cursor that can be
 * used to iterate the zonelist with next_zones_zonelist by advancing it by
 * one before calling.
 */
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
                    enum zone_type highest_zoneidx,
                    nodemask_t *nodes,
                    struct zone **zone)
{
    struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,
                            highest_zoneidx, nodes);
    *zone = zonelist_zone(z);
    return z;
}
/* Returns the next zone at or below highest_zoneidx in a zonelist */
struct zoneref *next_zones_zonelist(struct zoneref *z,
                    enum zone_type highest_zoneidx,
                    nodemask_t *nodes)
{
    /*
     * Find the next suitable zone to use for the allocation.
     * Only filter based on nodemask if it's set
     */
    if (likely(nodes == NULL))
        while (zonelist_zone_idx(z) > highest_zoneidx)
            z++;
    else
        while (zonelist_zone_idx(z) > highest_zoneidx ||
                (z->zone && !zref_in_nodemask(z, nodes)))
            z++;

    return z;
}


static inline int zonelist_zone_idx(struct zoneref *zoneref)
{
    return zoneref->zone_idx;
}

计算zone的核心函数在next_zones_zonelist()函数中,这里zonelist_zone_idx是gfp_zone()函数计算分配掩码得来。zonelist有一个zoneref数组,zoneref数据结构有一个成员zone指针会指向zone数据结构,还有一个zone_index成员指向zone的编号。zone在系统处理时会初始化这个数组,具体函数在build_zonelists_node中。在ARM Vexpress 平台中,zone类型、zoneref[]数组和zoneidx的关系如下:

ZONE_HIGHMEM _zonerefs[0]->zone_index = 1
ZONE_NORMAL  _zonerefs[1]->zone_index = 0

zonerefs[0]表示ZONE_HIGHMEM, 其zone的编号zone_index值为1: zonerefs[1]表示ZONE_NORMAL,其zone的编号zone_index为0, 也就是说,基于zone的设计思想是:分配物理页面时会优先考虑ZONE_HIGHMEM,因为ZONE_HIGHMEM在zonelist中排在ZONE_NORMAL前面。

    回到我们之前的例子,gfp_zone(GFP_KERNEL)函数返回0, 即highest_zoneidx为0,而这个内存节点的第一个zone是ZONE_HIGHMEM, 其zone编号zone_index的值为1,因此next_zones_zonelist()中,z++,最终first_zones_zonelist()函数会返回ZONE_NORMAL。在for_each_zone_zonelist_nodemask()遍历过程中也只能遍历ZONE_NORMAL这一个zone了。因为Vexpress平台只有HIGHMEM和NORMAL.

    

    再举一个例子,分配掩码为GFP_HIGHUSER_MOVABLE, GFP_HIGHUSER_MOVABLE包含了__GFP_HIGHMEM, 那么next_zones_zonelist函数会返回哪个zone呢?

    GFP_HIGHUSER_MOVABLE值为0x200da, 那么gfp_zone(GFP_HIGHUSER_MOVABLE) 函数等于2,即highest_zoneidx为2, 而这个内存结点的第一个ZONE_HIGHME, 其zhone编号zone_index的值为1.

  • 在first_zones_zonelist()函数中,由于第一个zone的zone_index值小于highest_zoneidx,因此会返回ZONE_HIGHMEM.

  • 在for_each_zone_zonelist_nodemask()函数中,next_zones_zonelist(++z, highidx, nodemask)依然会返回ZONE_NORMAL.

  • 因此这里会遍历ZONE_HIGHMEM和ZONE_NORMAL这两个zone,但是会先遍历ZONE_HIGHMEM, 然后才是ZONE_NORMAL.

要正确理解for_each_zone_zonelist_nodemask()这个宏的行为,需要理解如下两个方面。

  • highest_zoneidx是怎么计算来的,即如何解析分配掩码,这是gfp_zone()函数的职责。

  • 每个内存节点有一个struct pglist_data 数据结构,其成员node_zonelists 是一个struct zonelist数据结构,zonelist中包含了struct zoneref _zonerefs[]数组来描述这些zone。其中ZONE_HIGHMEM 排在前面,并且_zonerefs[0]->zone_index = 1, ZONE_NORMAL排在后面,且_zonerefs[1]->zone_index = 0;

    上述这些设计让人感觉有些复杂,但是这是正确理解以zone为基础的物理页面的分配机制的基石。

    在__alloc_pages_nodemask()中调用first_zones_zonelist(),计算出preferred_zoneref并且保存到ac.classzone_idx 变量中,该变量在kswapd内存线程中还会用到。例如以GFP_KERNEL为分配掩码,preferred_zone指的是ZONE_NORMAL, ac.classzone_idx值为0.

理解上面的函数,需要理解一些辅助函数

首先我们需要定义一些函数使用的标志,用于控制到达各个水位指定的临界状态时的行为。

enum zone_watermarks {
    WMARK_MIN,
    WMARK_LOW,
    WMARK_HIGH,
    NR_WMARK
};
/* The ALLOC_WMARK bits are used as an index to zone->watermark */
#define ALLOC_WMARK_MIN     WMARK_MIN /*使用pages_min 水位*/
#define ALLOC_WMARK_LOW     WMARK_LOW /*使用pages_low 水位*/
#define ALLOC_WMARK_HIGH    WMARK_HIGH /*使用pages_high 水位*/
#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all 完全不检查水位*/

/* Mask to get the watermark bits */
#define ALLOC_WMARK_MASK    (ALLOC_NO_WATERMARKS-1)

#define ALLOC_HARDER        0x10 /* try to alloc harder 试图更努力地分配,即放宽限制*/
#define ALLOC_HIGH      0x20 /* __GFP_HIGH set  设置了__GFP_HIGH*/
#define ALLOC_CPUSET        0x40 /* check for correct cpuset */
#define ALLOC_CMA       0x80 /* allow allocations from CMA areas */
#define ALLOC_FAIR      0x100 /* fair zone allocation 检查内存结点是否对应着指定的CPU集合*/

前几个标志表示在判断页是否可分配时,需要考虑哪些水位。默认情况下(即没有因其他因素带来的压力而需要更多的内存),只有内存域包含页的数目至少为zone->watermark[WMARK_HIGH]时,才能分配页。这对应于ALLOC_WMARK_HIGH标志。如果要使用较低(zone->watermark[WMARK_LOW])或最低(zone->watermark[WMARK_MIN])设置,则必须相应地设置ALLOC_WMARK_LOW或者ALLOC_WMARK_MIN。ALLOC_HARDER进一步放宽限制,最后,ALLOC_CPUSET告知内核,内存只能从当前进程允许的CPU相关联的内存结点分配,当然该选项只对NUMA系统有意义。

设置的标志在zone_watermark_ok()函数中检查,该函数根据设置的标志判断是否能从给定的内存域分配内存。

bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
              int classzone_idx, int alloc_flags)
{
    return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
                    zone_page_state(z, NR_FREE_PAGES));
}
/*
 * Return true if free pages are above 'mark'. This takes into account the order
 * of the allocation.
如果可用页面超出标记(mark),这考虑了分配顺序
 */
static bool __zone_watermark_ok(struct zone *z, unsigned int order,
            unsigned long mark, int classzone_idx, int alloc_flags,
            long free_pages)
{
    /* free_pages may go negative - that's OK free_pages可能变为负数,没有关系*/
    long min = mark;/*mark表示水水位值*/
    int o;
    long free_cma = 0;

    free_pages -= (1 << order) - 1;
    if (alloc_flags & ALLOC_HIGH)
        min -= min / 2;
    if (alloc_flags & ALLOC_HARDER)
        min -= min / 4;
#ifdef CONFIG_CMA
    /* If allocation can't use CMA areas don't use free CMA pages */
    if (!(alloc_flags & ALLOC_CMA))
        free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
#endif

    if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
        return false;
    for (o = 0; o < order; o++) {
        /* At the next order, this order's pages become unavailable  在下一阶,当前阶的页是不可用的*/
        free_pages -= z->free_area[o].nr_free << o;

        /* Require fewer higher order pages to be free 所需高阶空闲页的数目相对较少 */
        min >>= 1;

        if (free_pages <= min)
            return false;
    /*此循环的目的可以总结为:
        依次循环,检查内存中是否有足够多的大块(即order比较高)空闲内存。
        每次循环处理中,先把当前order的free page从总的free pages中减去,因为我们是看是否有足够多
        的大块内存。既然已经把free pages中的一部分已经划掉了,比较标准也应该相应放宽。
        放宽多少,就是对应min右移多少来决定。
    例子:如果请求分配的order 是1, 还有100个free pages,其中order 0的有96 pages,order 1 的有 4 pages,处理后的min是16
    这样在第一轮循环中,free_pages即变为4,min 假设右移了1位则为8,这样判断下来不满足watermark要求。
    如果将要求放宽,即将min右移4位,这样第一轮循环中min变为1,free pages满足watermark要求。
    */
    }
    return true;
}

zone_page_state(z, NR_FREE_PAGES)用来得到空闲页的数目。

    在解释了ALLOC_HIGH和ALLOC_HARDER标志后(将最小值标记降低到当前值的1/2或1/4,使分配过程努力或更加努力),该函数会检查空闲页的数目是否小于最小值与lowmem_reserve中指定的紧急分配值之和。如果不小于,则代码遍历所有小于当前阶的分配阶,从frea_pages减去当前分配阶的所有空闲页(左移o位是必要的,因为nr_free记载的时当前分配阶的空闲页块数目,而非单页的数目)。同时,每升高一阶,所需空闲页的最小值折半。如果内核遍历所有低端内存域之后,发现内存不足,则不进行内存分配。

    系统中定义的三个水位(WMARK_MIN/WMARK_LOW/WMARK_HIGH)。watermark水位的计算在__setup_per_zone_wmarks()函数中.

static void __setup_per_zone_wmarks(void)
{
    unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
    unsigned long lowmem_pages = 0;
    struct zone *zone;
    unsigned long flags;

    /* Calculate total number of !ZONE_HIGHMEM pages */
    for_each_zone(zone) {
        if (!is_highmem(zone))
            lowmem_pages += zone->managed_pages;
    }

    for_each_zone(zone) {
        u64 tmp;

        spin_lock_irqsave(&zone->lock, flags);
        tmp = (u64)pages_min * zone->managed_pages;
        do_div(tmp, lowmem_pages);
        if (is_highmem(zone)) {
            /*
             * __GFP_HIGH and PF_MEMALLOC allocations usually don't
             * need highmem pages, so cap pages_min to a small
             * value here.
             *
             * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
             * deltas controls asynch page reclaim, and so should
             * not be capped for highmem.
             */
            unsigned long min_pages;

            min_pages = zone->managed_pages / 1024;
            min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
            zone->watermark[WMARK_MIN] = min_pages;
        } else {
            /*
             * If it's a lowmem zone, reserve a number of pages
             * proportionate to the zone's size.
             */
            zone->watermark[WMARK_MIN] = tmp;
        }

        zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
        zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);

        __mod_zone_page_state(zone, NR_ALLOC_BATCH,
            high_wmark_pages(zone) - low_wmark_pages(zone) -
            atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));

        setup_zone_migrate_reserve(zone);
        spin_unlock_irqrestore(&zone->lock, flags);
    }

    /* update totalreserve_pages */
    calculate_totalreserve_pages();
}

计算watermark水位用到min_free_kbytes这个值,它是系统启动时通过系统空闲页面的数量来计算的,具体计算在init_per_zone_wmark_min()函数中。另外系统起来之后也可以通过sysfs来设置,节点在"/proc/sys/vm/min_free_kbytes",后续伙伴系统和kswapd内核线程会用到。

    

    我们假设zone_watermark_ok()判断空闲页面充沛,接下来就会调用buffered_rmqueue()函数从伙伴系统中分配物理页面。

如果内存找到适当的内存域,具有足够的空闲页可供分配,那么还有两件事情需要完成。首先它必须检查这些页是否是连续的(到目前为止,只知道有许多空闲页)。其次,必须按伙伴系统的方式从free_lists移除这些页,这可能需要分解并重排内存区。

    内核将该工作委托给buffered_rmqueue()函数. 改函数必需各个步骤如下:

__alloc_pages_nodemask()->get_page_from_freelist()->buffered_rmqueue()

/*
 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
 */
static inline
struct page *buffered_rmqueue(struct zone *preferred_zone,
            struct zone *zone, unsigned int order,
            gfp_t gfp_flags, int migratetype)
{
    unsigned long flags;
    struct page *page;
    bool cold = ((gfp_flags & __GFP_COLD) != 0);/*是否指定冷页*/

    if (likely(order == 0)) {/*分配单页*/
        struct per_cpu_pages *pcp;
        struct list_head *list;

        local_irq_save(flags);/*禁止本地CPU中断,禁止前保存中断状态*/
        pcp = &this_cpu_ptr(zone->pageset)->pcp;/*获取到cpu高速缓存*/
        list = &pcp->lists[migratetype];/*根据迁移类型,得到高速缓存的freelist*/
        if (list_empty(list)) {/*空的,高速缓存没有数据,这可能是上次获取的cpu高速缓存迁移类型和这次不一样*/
            /*下面查看rmqueue_bulk()函数实现*/
            pcp->count += rmqueue_bulk(zone, 0,
                    pcp->batch, list,
                    migratetype, cold);/*该函数向高速缓存添加内存页,从伙伴系统中得到页,然后填充到cpu高速缓存中, batch:批量*/
            if (unlikely(list_empty(list)))
                goto failed;
        }

        if (cold)
            page = list_entry(list->prev, struct page, lru);
        else
            page = list_entry(list->next, struct page, lru);

        list_del(&page->lru);
        pcp->count--;
    } else {
        if (unlikely(gfp_flags & __GFP_NOFAIL)) {
            /*
             * __GFP_NOFAIL is not to be used in new code.
             *
             * All __GFP_NOFAIL callers should be fixed so that they
             * properly detect and handle allocation failures.
             *
             * We most definitely don't want callers attempting to
             * allocate greater than order-1 page units with
             * __GFP_NOFAIL.
             */
            WARN_ON_ONCE(order > 1);
        }
        spin_lock_irqsave(&zone->lock, flags);
        page = __rmqueue(zone, order, migratetype);
        spin_unlock(&zone->lock);
        if (!page)
            goto failed;
        __mod_zone_freepage_state(zone, -(1 << order),
                      get_freepage_migratetype(page));
    }

    __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
    if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
        !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
        set_bit(ZONE_FAIR_DEPLETED, &zone->flags);

    __count_zone_vm_events(PGALLOC, zone, 1 << order);
    zone_statistics(preferred_zone, zone, gfp_flags);
    local_irq_restore(flags);

    VM_BUG_ON_PAGE(bad_range(zone, page), page);
    return page;

failed:
    local_irq_restore(flags);
    return NULL;
}

如果只分配一页,内核会进行优化,即分配为0的情况,2^0 = 1。该页不是从伙伴系统直接取得,而是取自per-CPU的页缓存((深入Linux内核架构 184页)回想一下,可知该缓存提供了CPU本地的热页和冷页的列表,zone->pageset是一个数组,用于实现每个CPU的热/冷页帧列表,内核使用这些列表来保存可用于满足实现的"新鲜"页。但冷热页帧对应的高速缓存状态不同:有些页帧也很可能仍然在高速缓存中,因此可以快速访问,故称为热的;未缓存的页帧与此相对,故称之为冷的)。

    分配1页的情况:

        如果分配标志设置了GFP_COLD,那么必须从per-CPU缓存取得冷页,前提是有的话。

rmqueue_bulk()函数实现

[__alloc_pages_nodemask()->get_page_from_freelist()->buffered_rmqueue()->rmqueue_bulk()]

/*
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order,
            unsigned long count, struct list_head *list,
            int migratetype, bool cold)
{
    int i;

    spin_lock(&zone->lock);
    for (i = 0; i < count; ++i) {/*一个页面一个页面处理*/
        /*下面查看__rmqueue()函数实现*/
        struct page *page = __rmqueue(zone, order, migratetype);/*分配到指定迁移类型的内存页*/
        if (unlikely(page == NULL))
            break;

        /*
         * Split buddy pages returned by expand() are received here
         * in physical page order. The page is added to the callers and
         * list and the list head then moves forward. From the callers
         * perspective, the linked list is ordered by page number in
         * some conditions. This is useful for IO devices that can
         * merge IO requests if the physical pages are ordered
         * properly.
         */
        if (likely(!cold))
            list_add(&page->lru, list);/*如果是冷页,则添加到链表头*/
        else
            list_add_tail(&page->lru, list);/*否则添加到链表尾部*/
        list = &page->lru;
        if (is_migrate_cma(get_freepage_migratetype(page)))
            __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
                          -(1 << order));
    }
    __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));/*修改空闲页面的计数*/
    spin_unlock(&zone->lock);
    return i;/*返回添加到cpu高速缓存链表的页面个数*/
}

当分配多页情况时,下面函数和分配一页的情况都是调用的函数:

__rmqueue()函数实现

[__alloc_pages_nodemask()->get_page_from_freelist()->buffered_rmqueue()->rmqueue_bulk()->__rmqueue()]

/*
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
    分配指定迁移类型的内存页
 */
static struct page *__rmqueue(struct zone *zone, unsigned int order,
                        int migratetype)
{
    struct page *page;

retry_reserve:
    /*下面查看此函数实现*/
    page = __rmqueue_smallest(zone, order, migratetype);/*常规情况下,从zone上分配指定的迁移类型的内存页,根据传递进来
        的分配阶、用于获取页的内存域、迁移类型,来扫描页的列表,直到找到适当的连续内存块。*/

    if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {/*上面没有分配的内存页,并且不是紧急的迁移类型*/
        page = __rmqueue_fallback(zone, order, migratetype);/*尝试其他的迁移列表,作为应急措施*/

        /*
         * Use MIGRATE_RESERVE rather than fail an allocation. goto
         * is used because __rmqueue_smallest is an inline function
         * and we want just one call site
         */
        if (!page) {/*没有成功,则把迁移类型调整为MIGRATE_RESERVE表示是紧急分配*/
            migratetype = MIGRATE_RESERVE;
            goto retry_reserve;
        }
    }

    trace_mm_page_alloc_zone_locked(page, order, migratetype);
    return page;
}

__rmqueue_smallest()函数实现

[__alloc_pages_nodemask()->get_page_from_freelist()->buffered_rmqueue()->rmqueue_bulk()->__rmqueue()->__rmqueue_smallest()]

/*
 * Go through the free lists for the given migratetype and remove
 * the smallest available page from the freelists
 */
static inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                        int migratetype)
{
    unsigned int current_order;
    struct free_area *area;
    struct page *page;

    /* Find a page of the appropriate size in the preferred list 在首选的列表中找到适当大小的页,
        小的内存区无用,因为分配的页必须是连续的。我们知道给定分配阶的所有页又再分成对应于不同迁移类型的列表,
        在其中需要选择正确的一项。
        检查适当大小的内存块非常简单。如果检查的列表中有一个元素,那么它就是可用的,因为其中包含了所需数目的连续页。否则
        内核将选择下一个更高分配阶,并进行类似的搜索。
     */
    for (current_order = order; current_order < MAX_ORDER; ++current_order) {
        area = &(zone->free_area[current_order]);
        if (list_empty(&area->free_list[migratetype]))
            continue;

        page = list_entry(area->free_list[migratetype].next,
                            struct page, lru);
        list_del(&page->lru);
        rmv_page_order(page);/*设置属性,清除buddy标识,也就是设置page->_mapcount = -1*/
        area->nr_free--;/*在用list_del从链表移除一个内存块之后,要注意,必须将struct free_area的nr_free成员减1*/
        /*下面查看expand()函数实现*/
        expand(zone, page, order, current_order, area, migratetype);
        set_freepage_migratetype(page, migratetype);
        return page;
    }

    return NULL;
}

    在__rmqueue_smallest()函数中,首先从order开始查找zone中空闲链表,如果zone的当前order对应的空闲区free_area中相应migratetype类型的链表没有空闲对象,那么就会查找下一级order。

    为什么会这样?因为在系统启动时,空闲页面会尽可能地都分配到MAX_ORDER-1的链表中,这个可以在系统刚起来之后,通过"cat /proc/pagetypeinfo"命令看出端倪。当找到某一个order的空闲区中对应的migratetype类型的空闲链表中有空闲内存块时,就会从中把一个内存块摘取下来,然后调用expand()函数来"切蛋糕"。因为通常摘下来的内存块要比需要的内存大,切完之后需要把剩下的内存块重新放回伙伴系统中。

get_page_from_freelist()->buffered_rmqueue()->__rmqueue()->__rmqueue_smallest()->expand()

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- nyc
 */
/* 该函数使用了一组桉树,page、zone、area的语义都很显然。index指定了该伙伴对在分配位图中的索引位置,low是预期的分配阶,
    high表示内存取自哪个分配阶。migratetype表示迁移类型。*/
static inline void expand(struct zone *zone, struct page *page,
    int low, int high, struct free_area *area,
    int migratetype)
{
    unsigned long size = 1 << high;

    while (high > low) {
        area--;
        high--;
        size >>= 1;
        VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);

        if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
            debug_guardpage_enabled() &&
            high < debug_guardpage_minorder()) {
            /*
             * Mark as guard pages (or page), that will allow to
             * merge back to allocator when buddy will be freed.
             * Corresponding page table entries will not be touched,
             * pages will stay not present in virtual address space
             */
            set_page_guard(zone, &page[size], high, migratetype);
            continue;
        }
        list_add(&page[size].lru, &area->free_list[migratetype]);
        area->nr_free++;
        set_page_order(&page[size], high);
    }
}

    最好逐步看一下代码,理解其工作方式。我们假定一下情况:将要分配一个阶为3的块。内存中没有该长度的块,因此内核选择了一个阶为5的块。因此调用该函数的参数如下:

expand(zone, page, low = 3, high = 5, area, migratetype)

    如果在特定的迁移类型列表上没有连续内存区可用,则__rmqueue_smallest()返回NULL指针。内核接下来根据备用次序,尝试使用其他迁移类型的列表满足分配请求。该任务委托给__rmqueue_fallback。迁移类型的备用次序在fallbacks数组定义。fallbacks:后备

    内核总是使用特定于迁移类型的free_area列表,在处理期间不会改变页的迁移类型。(上图有些错误,最下面mem_map,应该是page指向的是8页,所以最后两格应该是白色,最前面一格应该是浅灰色,然后再分割成两格(每格8页))

  1. size的值初始化为2^5 = 32.分配的内存区已经在__rmqueue中从free_area列表移除,因此在上图虚线部分已经画出。

  2. 在第一遍循环中,内核切换到低一个分配阶、迁移类型相同的free_area列表,即阶为4。类似地,内存区长度降低到16(通过size>>1 计算)。初始内存区的后一半插入到阶为4的free_area列表中。伙伴系统只要内存区第一个page实例,用作管理用途。内存区的长度可根据页所在的列表自动推导而得

  3. 后一半内存区的地址可以通过&page[size]计算。而page指针一直指向最初分配内存区的起始地址,并不改变。page指针指向的位置如上图中用箭头表示。

  4. 下一遍循环将剩余16页的后一半放置到对应于size = 8 的free_area列表上,page指针仍然不动。现在剩余的内存区已经是预期长度,可以将page指针作为结果返回。从上图可见,显然使用了初始页内存的起始8页。所有其余各页都进入到伙伴系统中适当的free_area列表里。

内核总是使用特定于迁移类型的free_area列表,在处理期间不会改变页的迁移类型。(上图有些错误,最下面mem_map,应该是page指向的是8页,所以最后两格应该是白色,最前面一格应该是浅灰色,然后再分割成两格(每格8页))

    如果在特定的迁移类型列表上没有连续内存区可用,则__rmqueue_smallest()返回NULL指针。内核接下来根据备用次序,尝试使用其他迁移类型的列表满足分配请求。该任务委托给__rmqueue_fallback。迁移类型的备用次序在fallbacks数组定义。fallbacks:后备

/*
 * This array describes the order lists are fallen back to when
 * the free lists for the desirable migrate type are depleted
 */
static int fallbacks[MIGRATE_TYPES][4] = {
    [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
    [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
#ifdef CONFIG_CMA
    [MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
    [MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
#else
    [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
#endif
    [MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
#ifdef CONFIG_MEMORY_ISOLATION
    [MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
#endif
};

首先,函数再一次遍历各个分配阶的列表:

/* Remove an element from the buddy allocator from the fallback list 

*/
/*但不只是相同的迁移类型,还要考虑备用列表中指定的不同迁移类型。请注意:该函数会按照分配阶从大到小遍历!
这与通常的策略相反(除了MIGRATE_RESERVER),内核的策略是,如果无法避免分配迁移类型不同的内存块,那么就分配一个尽可能大的内存块。
如果优先选择更小的内存块,则会向其他列表引入碎片,因为不同迁移类型的内存块将会混合起来,这显然不是我们想要的。
特别列表MIGRATE_RESERVE包含了用于紧急分配的内存,需要特殊处理。如果当前考虑的迁移类型对应的空闲列表包含空闲内存块,则从该列表分配内存*/
static inline struct page *
__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
{
    struct free_area *area;
    unsigned int current_order;
    struct page *page;

    /* Find the largest possible block of pages in the other list
        在其他类型列表中找到最大可能的内存块
     */
    for (current_order = MAX_ORDER-1;
                current_order >= order && current_order <= MAX_ORDER-1;
                --current_order) {/*这是和指定迁移类型的遍历不一样,这里从最大阶开始比遍历,就是为了防止内存碎片*/
        int i;
        for (i = 0;; i++) {
            int migratetype = fallbacks[start_migratetype][i];
            int buddy_type = start_migratetype;

            /* MIGRATE_RESERVE handled later if necessary */
            if (migratetype == MIGRATE_RESERVE)
                break;

            area = &(zone->free_area[current_order]);/*得到高阶空闲数组元素*/
            if (list_empty(&area->free_list[migratetype])) /*如果对应阶上的对应迁移类型的空闲页链表是空的,则循环找备用迁移类型的空闲链表*/
                continue;

            page = list_entry(area->free_list[migratetype].next,
                    struct page, lru);/*如果找到了空闲页块,则当前阶上的空闲页块递减*/
            area->nr_free--;

            if (!is_migrate_cma(migratetype)) {/*不是CMA区域*/
                try_to_steal_freepages(zone, page,
                            start_migratetype,
                            migratetype);
            } else {
                /*
                 * When borrowing from MIGRATE_CMA, we need to
                 * release the excess buddy pages to CMA
                 * itself, and we do not try to steal extra
                 * free pages.
                 */
                buddy_type = migratetype;
            }

            /* Remove the page from the freelists */
            list_del(&page->lru);
            rmv_page_order(page);/*清楚buddy的标识,标识该page将不是buddy系统的了*/

            expand(zone, page, order, current_order, area,
                    buddy_type);

            /*
             * The freepage_migratetype may differ from pageblock's
             * migratetype depending on the decisions in
             * try_to_steal_freepages(). This is OK as long as it
             * does not differ for MIGRATE_CMA pageblocks. For CMA
             * we need to make sure unallocated pages flushed from
             * pcp lists are returned to the correct freelist.
             */
            set_freepage_migratetype(page, buddy_type);

            trace_mm_page_alloc_extfrag(page, order, current_order,
                start_migratetype, migratetype);

            return page;
        }
    }

    return NULL;
}

/*
 * When we are falling back to another migratetype during allocation, try to
 * steal extra free pages from the same pageblocks to satisfy further
 * allocations, instead of polluting multiple pageblocks.
 *
 * If we are stealing a relatively large buddy page, it is likely there will
 * be more free pages in the pageblock, so try to steal them all. For
 * reclaimable and unmovable allocations, we steal regardless of page size,
 * as fragmentation caused by those allocations polluting movable pageblocks
 * is worse than movable allocations stealing from unmovable and reclaimable
 * pageblocks.
 *
 * If we claim more than half of the pageblock, change pageblock's migratetype
 * as well.
 */
static void try_to_steal_freepages(struct zone *zone, struct page *page,
                  int start_type, int fallback_type)
{
    int current_order = page_order(page);

    /* Take ownership for orders >= pageblock_order  较大内存块有多大的概念由全局变量pageblock_order给出
       该变量定义了大内存块分配阶,如果需要分解来自其他迁移列表的空闲内存块,那么内核必须决定如何处理剩余的页。
        如果剩余部分也是一个比较大的内存块,那么将整个内存块都转到当前分配类型对应的迁移列表是有意义的,这样可以减少碎片。
        如果是在分配可回收内存,那么内核在将空闲页从一个迁移列表移动另一个时,会更加积极。
        此类分配经常猝发涌现,导致许多小的可回收内存块散布到所有的迁移列表。为了避免此类情况,分配MIGRATE_RECLAIMABLE内存块时,
        剩余的页总是转移到可回收迁移列表。*/
    if (current_order >= pageblock_order) { //pageblock_order = (MAX_ODRER-1)
        change_pageblock_range(page, current_order, start_type);
        return;
    }

    if (current_order >= pageblock_order / 2 || /*大内存块,则全部转到start_migratetype类型下*/
        start_type == MIGRATE_RECLAIMABLE || /*可回收内存页,就迁移类型转换时,会更加积极*/
        start_type == MIGRATE_UNMOVABLE ||
        page_group_by_mobility_disabled) {
        int pages;

        pages = move_freepages_block(zone, page, start_type);/*把这些页面转换到start_migratetype迁移类型下面去*/

        /* Claim the whole block if over half of it is free */
        if (pages >= (1 << (pageblock_order-1)) ||
                page_group_by_mobility_disabled)
            set_pageblock_migratetype(page, start_type);/*这里是设置整个页面的迁移类型,上面move_freepages_block函数是设置每个页的迁移类型*/
    }
}

/*就是将一堆(pageblock大小)的page移动到migratetype类型的链表中*/
int move_freepages_block(struct zone *zone, struct page *page,
                int migratetype)
{
    unsigned long start_pfn, end_pfn;
    struct page *start_page, *end_page;

    start_pfn = page_to_pfn(page);/*页帧号*/
    /*pageblock_nr_pages是迁移类型认为大阶所对应的页数, pageblock_nr_pages = (1UL << pageblock_order) = 1024*/
    start_pfn = start_pfn & ~(pageblock_nr_pages-1);
    start_page = pfn_to_page(start_pfn);
    /*准备迁移pageblock_nr_pages个页面,一般要转换迁移类型的话,就转换pageblock_br_pages个连续页面,这样会减少内存碎片*/
    end_page = start_page + pageblock_nr_pages - 1;
    end_pfn = start_pfn + pageblock_nr_pages - 1;

    /* Do not cross zone boundaries */
    if (!zone_spans_pfn(zone, start_pfn))
        start_page = page;
    if (!zone_spans_pfn(zone, end_pfn))/*判断要迁移的内存区是否在一个zone上,不能交错zone*/
        return 0;

    return move_freepages(zone, start_page, end_page, migratetype);/*把要转换迁移类型的内存页面地址范围给move_freepages()进行转换*/
}

/*
 * Move the free pages in a range to the free lists of the requested type.
 * Note that start_page and end_pages are not aligned on a pageblock
 * boundary. If alignment is required, use move_freepages_block()
 */
int move_freepages(struct zone *zone,
              struct page *start_page, struct page *end_page,
              int migratetype)
{
    struct page *page;
    unsigned long order;
    int pages_moved = 0;

#ifndef CONFIG_HOLES_IN_ZONE
    /*
     * page_zone is not safe to call in this context when
     * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
     * anyway as we check zone boundaries in move_freepages_block().
     * Remove at a later date when no bug reports exist related to
     * grouping pages by mobility
     */
    VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
#endif

    for (page = start_page; page <= end_page;) {
        /* Make sure we are not inadvertently changing nodes */
        VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);

        if (!pfn_valid_within(page_to_pfn(page))) {
            page++;
            continue;
        }

        if (!PageBuddy(page)) {/*现在页还是伙伴系统的*/
            page++;
            continue;
        }

        order = page_order(page);//得到阶
        list_move(&page->lru,
              &zone->free_area[order].free_list[migratetype]);//把这些页搬迁到指定迁移类型对应的链表上
        set_freepage_migratetype(page, migratetype);/*设置这些页的迁移类型 page->index = migratetype*/
        page += 1 << order;/*一下子就转换了2^order个页面*/
        pages_moved += 1 << order;
    }

    return pages_moved;/*把范围内的页都迁移完,返回实际迁移了多少页*/
}

所需求的页面分配成功后,__rmqueue()函数返回这个内存块的起始页面的struct page 数据结构。回到buffered_rmqueue()函数,最后还需要利用zone_statistics()函数做一些统计数据的计算。

回到get_page_from_freelist()函数中,最后还要通过prep_new_page()函数做一些有趣的检查,才能出厂。

static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
                                int alloc_flags)
{
    int i;

    for (i = 0; i < (1 << order); i++) {
        struct page *p = page + i;
        if (unlikely(check_new_page(p)))
            return 1;
    }

    set_page_private(page, 0);
    set_page_refcounted(page);

    arch_alloc_page(page, order);
    kernel_map_pages(page, 1 << order, 1);
    kasan_alloc_pages(page, order);

    if (gfp_flags & __GFP_ZERO)
        prep_zero_page(page, order, gfp_flags);

    if (order && (gfp_flags & __GFP_COMP))
        prep_compound_page(page, order);

    set_page_owner(page, order, gfp_flags);

    /*
     * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
     * allocate the page. The expectation is that the caller is taking
     * steps that will free more memory. The caller should avoid the page
     * being used for !PFMEMALLOC purposes.
     */
    page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);

    return 0;
}


/*
 * This page is about to be returned from the page allocator
 */
static inline int check_new_page(struct page *page)
{
    const char *bad_reason = NULL;
    unsigned long bad_flags = 0;

    if (unlikely(page_mapcount(page))) /*刚分配页面的struct page的_mapcount计数应该为0*/
        bad_reason = "nonzero mapcount";
    if (unlikely(page->mapping != NULL)) /*这时page->mapping 为NULL*/
        bad_reason = "non-NULL mapping";
    if (unlikely(atomic_read(&page->_count) != 0)) /*判断这时page的_count是否为0.注意alloc_pages()分配的page的_count应该为1,但是这里为0,因为这个函数之后还调用了
            set_page_refcounted()->set_page_count(),把_count设置为1*/
        bad_reason = "nonzero _count";
    if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { /*检查PAGE_FLAGS_CHECK_AT_PREP标志位,这个flag在free_page时已经清除了,而这时该flag被设置,说明 分配过程中有问题。*/
        bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
        bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
    }
#ifdef CONFIG_MEMCG
    if (unlikely(page->mem_cgroup))
        bad_reason = "page still charged to cgroup";
#endif
    if (unlikely(bad_reason)) {
        bad_page(page, bad_reason, bad_flags);
        return 1;
    }
    return 0;
}

  • 1
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

byd yes

你的鼓励是我最大的动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值