Linux Kernel 内存管理之CMA分配和释放

 

/**  * cma_alloc() - allocate pages from contiguous area  * @cma:   Contiguous memory region for which the allocation is performed.  * @count: Requested number of pages.  * @align: Requested alignment of pages (in PAGE_SIZE order).  *  * This function allocates part of contiguous memory on specific  * contiguous memory area.  */ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) {  unsigned long mask, offset;  unsigned long pfn = -1;  unsigned long start = 0;  unsigned long bitmap_maxno, bitmap_no, bitmap_count;  struct page *page = NULL;  int ret;

 if (!cma || !cma->count)   return NULL;

 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,    count, align);

 if (!count)   return NULL;

 mask = cma_bitmap_aligned_mask(cma, align);  offset = cma_bitmap_aligned_offset(cma, align);  bitmap_maxno = cma_bitmap_maxno(cma);  bitmap_count = cma_bitmap_pages_to_bits(cma, count);

 if (bitmap_count > bitmap_maxno)   return NULL;

 for (;;) {   mutex_lock(&cma->lock);   bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,     bitmap_maxno, start, bitmap_count, mask,     offset);   if (bitmap_no >= bitmap_maxno) {    mutex_unlock(&cma->lock);    break;   }   bitmap_set(cma->bitmap, bitmap_no, bitmap_count);   /*    * It's safe to drop the lock here. We've marked this region for    * our exclusive use. If the migration fails we will take the    * lock again and unmark it.    */   mutex_unlock(&cma->lock);

  pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);   mutex_lock(&cma_mutex);   ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);   mutex_unlock(&cma_mutex);   if (ret == 0) {    page = pfn_to_page(pfn);    break;   }

  cma_clear_bitmap(cma, pfn, count);   if (ret != -EBUSY)    break;

  pr_debug("%s(): memory range at %p is busy, retrying\n",     __func__, pfn_to_page(pfn));   /* try again with a bit different memory target */   start = bitmap_no + mask + 1;  }

 trace_cma_alloc(pfn, page, count, align);

 pr_debug("%s(): returned %p\n", __func__, page);  return page; }

 

/**  * alloc_contig_range() -- tries to allocate given range of pages  * @start: start PFN to allocate  * @end: one-past-the-last PFN to allocate  * @migratetype: migratetype of the underlaying pageblocks (either  *   #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks  *   in range must have the same migratetype and it must  *   be either of the two.  *  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES  * aligned, however it's the caller's responsibility to guarantee that  * we are the only thread that changes migrate type of pageblocks the  * pages fall in.  *  * The PFN range must belong to a single zone.  *  * Returns zero on success or negative error code.  On success all  * pages which PFN is in [start, end) are allocated for the caller and  * need to be freed with free_contig_range().  */ int alloc_contig_range(unsigned long start, unsigned long end,          unsigned migratetype) {  unsigned long outer_start, outer_end;  unsigned int order;  int ret = 0;

 struct compact_control cc = {   .nr_migratepages = 0,   .order = -1,   .zone = page_zone(pfn_to_page(start)),   .mode = MIGRATE_SYNC,   .ignore_skip_hint = true,  };  INIT_LIST_HEAD(&cc.migratepages);

 /*   * What we do here is we mark all pageblocks in range as   * MIGRATE_ISOLATE.  Because pageblock and max order pages may   * have different sizes, and due to the way page allocator   * work, we align the range to biggest of the two pages so   * that page allocator won't try to merge buddies from   * different pageblocks and change MIGRATE_ISOLATE to some   * other migration type.   *   * Once the pageblocks are marked as MIGRATE_ISOLATE, we   * migrate the pages from an unaligned range (ie. pages that   * we are interested in).  This will put all the pages in   * range back to page allocator as MIGRATE_ISOLATE.   *   * When this is done, we take the pages in range from page   * allocator removing them from the buddy system.  This way   * page allocator will never consider using them.   *   * This lets us mark the pageblocks back as   * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the   * aligned range but not in the unaligned, original range are   * put back to page allocator so that buddy can use them.   */

 ret = start_isolate_page_range(pfn_max_align_down(start),            pfn_max_align_up(end), migratetype,            false);  if (ret)   return ret;

 /*   * In case of -EBUSY, we'd like to know which page causes problem.   * So, just fall through. We will check it in test_pages_isolated().   */  ret = __alloc_contig_migrate_range(&cc, start, end);  if (ret && ret != -EBUSY)   goto done;

 /*   * Pages from [start, end) are within a MAX_ORDER_NR_PAGES   * aligned blocks that are marked as MIGRATE_ISOLATE.  What's   * more, all pages in [start, end) are free in page allocator.   * What we are going to do is to allocate all pages from   * [start, end) (that is remove them from page allocator).   *   * The only problem is that pages at the beginning and at the   * end of interesting range may be not aligned with pages that   * page allocator holds, ie. they can be part of higher order   * pages.  Because of this, we reserve the bigger range and   * once this is done free the pages we are not interested in.   *   * We don't have to hold zone->lock here because the pages are   * isolated thus they won't get removed from buddy.   */

 lru_add_drain_all();  drain_all_pages(cc.zone);

 order = 0;  outer_start = start;  while (!PageBuddy(pfn_to_page(outer_start))) {   if (++order >= MAX_ORDER) {    outer_start = start;    break;   }   outer_start &= ~0UL << order;  }

 if (outer_start != start) {   order = page_order(pfn_to_page(outer_start));

  /*    * outer_start page could be small order buddy page and    * it doesn't include start page. Adjust outer_start    * in this case to report failed page properly    * on tracepoint in test_pages_isolated()    */   if (outer_start + (1UL << order) <= start)    outer_start = start;  }

 /* Make sure the range is really isolated. */  if (test_pages_isolated(outer_start, end, false)) {   pr_info("%s: [%lx, %lx) PFNs busy\n",    __func__, outer_start, end);   ret = -EBUSY;   goto done;  }

 /* Grab isolated pages from freelists. */  outer_end = isolate_freepages_range(&cc, outer_start, end);  if (!outer_end) {   ret = -EBUSY;   goto done;  }

 /* Free head and tail (if any) */  if (start != outer_start)   free_contig_range(outer_start, start - outer_start);  if (end != outer_end)   free_contig_range(end, outer_end - end);

done:  undo_isolate_page_range(pfn_max_align_down(start),     pfn_max_align_up(end), migratetype);  return ret; }

 

释放CMA内存:

/**  * cma_release() - release allocated pages  * @cma:   Contiguous memory region for which the allocation is performed.  * @pages: Allocated pages.  * @count: Number of allocated pages.  *  * This function releases memory allocated by alloc_cma().  * It returns false when provided pages do not belong to contiguous area and  * true otherwise.  */ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) {  unsigned long pfn;

 if (!cma || !pages)   return false;

 pr_debug("%s(page %p)\n", __func__, (void *)pages);

 pfn = page_to_pfn(pages);

 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)   return false;

 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);

 free_contig_range(pfn, count);  cma_clear_bitmap(cma, pfn, count);  trace_cma_release(pfn, pages, count);

 return true; }

 

 
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值