zone_reclaim

 

int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned intorder)

{

       int node_id;

       int ret;

 

       if (zone_pagecache_reclaimable(zone)<= zone->min_unmapped_pages &&

          zone_page_state(zone, NR_SLAB_RECLAIMABLE) <=zone->min_slab_pages)

              return ZONE_RECLAIM_FULL;

 

       if (!zone_reclaimable(zone))

              return ZONE_RECLAIM_FULL;

 

       /*

        *Do not scan if the allocation should not be delayed.

        */

       if (!gfpflags_allow_blocking(gfp_mask) ||(current->flags & PF_MEMALLOC))

              return ZONE_RECLAIM_NOSCAN;

 

       /*

        *Only run zone reclaim on the local zone or on zones that do not

        *have associated processors. This will favor the local processor

        *over remote processors and spread off node memory allocations

        *as wide as possible.

        */

       node_id = zone_to_nid(zone);

       if (node_state(node_id, N_CPU) &&node_id != numa_node_id())

              return ZONE_RECLAIM_NOSCAN;

 

       if (test_and_set_bit(ZONE_RECLAIM_LOCKED,&zone->flags))

              return ZONE_RECLAIM_NOSCAN;

 

       ret = __zone_reclaim(zone,gfp_mask, order);

       clear_bit(ZONE_RECLAIM_LOCKED,&zone->flags);

 

       if (!ret)

              count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);

 

       return ret;

}

 

 

 

 

 

 

/*

 * Try to free up some pages from this zonethrough reclaim.

 */

static int__zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)

{

       /* Minimum pages needed in order to stayon node */

       const unsigned long nr_pages = 1 <<order;

       struct task_struct *p = current;

       struct reclaim_state reclaim_state;

       struct scan_control sc = {

              .nr_to_reclaim = max(nr_pages,SWAP_CLUSTER_MAX),

              .gfp_mask = (gfp_mask =memalloc_noio_flags(gfp_mask)),

              .order = order,

              .priority = ZONE_RECLAIM_PRIORITY,

              .may_writepage =!!(zone_reclaim_mode & RECLAIM_WRITE),

              .may_unmap = !!(zone_reclaim_mode& RECLAIM_UNMAP),

              .may_swap = 1,

       };

 

       cond_resched();

       p->flags |= PF_MEMALLOC |PF_SWAPWRITE;

       lockdep_set_current_reclaim_state(gfp_mask);

       reclaim_state.reclaimed_slab = 0;

       p->reclaim_state = &reclaim_state;

 

       if(zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {

              /*

               * Free memory by calling shrink zone withincreasing

               * priorities until we have enough memoryfreed.

               */

              do {

                     shrink_zone(zone,&sc, true);

              } while (sc.nr_reclaimed < nr_pages&& --sc.priority >= 0);

       }

 

       p->reclaim_state = NULL;

       current->flags &= ~(PF_MEMALLOC |PF_SWAPWRITE);

       lockdep_clear_current_reclaim_state();

       return sc.nr_reclaimed >= nr_pages;

}

 

 

 

/* Work out howmany page cache pages we can reclaim in this reclaim_mode */

static unsignedlong zone_pagecache_reclaimable(structzone *zone)

{

       unsigned long nr_pagecache_reclaimable;

       unsigned long delta = 0;

 

       /*

        *If RECLAIM_UNMAP is set, then all file pages are considered

        *potentially reclaimable. Otherwise, we have to worry about

        *pages like swapcache and zone_unmapped_file_pages() provides

        *a better estimate

        */

       if (zone_reclaim_mode &RECLAIM_UNMAP)

              nr_pagecache_reclaimable =zone_page_state(zone, NR_FILE_PAGES);

       else

              nr_pagecache_reclaimable =zone_unmapped_file_pages(zone);

 

       /* If we can't clean pages, remove dirtypages from consideration */

       if (!(zone_reclaim_mode &RECLAIM_WRITE))

              delta += zone_page_state(zone,NR_FILE_DIRTY);

 

       /* Watch for any possible underflows dueto delta */

       if (unlikely(delta >nr_pagecache_reclaimable))

              delta = nr_pagecache_reclaimable;

 

       return nr_pagecache_reclaimable - delta;

}

 

 

 

static inlineunsigned long zone_unmapped_file_pages(structzone *zone)

{

       unsigned long file_mapped =zone_page_state(zone, NR_FILE_MAPPED);

       unsigned long file_lru =zone_page_state(zone, NR_INACTIVE_FILE) +

              zone_page_state(zone,NR_ACTIVE_FILE);

 

       /*

        *It's possible for there to be more file mapped pages than

        *accounted for by the pages on the file LRU lists because

        *tmpfs pages accounted for as ANON can also be FILE_MAPPED

        */

       return (file_lru > file_mapped) ?(file_lru - file_mapped) : 0;

}

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值