内存管理---内存释放

  Linux内存释放函数之间的调用关系如下图所示

         

hi

/*用虚拟地址进行释放*/
void free_pages(unsigned long addr, unsigned int order)
{
	if (addr != 0) {
		VM_BUG_ON(!virt_addr_valid((void *)addr));
		__free_pages(virt_to_page((void *)addr), order);/*具体的释放函数*/
	}
}

void __free_pages(struct page *page, unsigned int order)
{
	if (put_page_testzero(page)) {/*判断页没有被使用*/
		trace_mm_page_free_direct(page, order);
		if (order == 0)/*单页则释放到每CPU页框高速缓存中*/
			free_hot_page(page);
		else           /*多页则释放到伙伴系统*/
			__free_pages_ok(page, order);
	}
}

释放单个页面free_hot_page()调用free_hot_cold_page()函数



/*
 * Free a 0-order page
 * cold == 1 ? free a cold page : free a hot page
 */
void free_hot_cold_page(struct page *page, int cold)
{
	struct zone *zone = page_zone(page);
	struct per_cpu_pages *pcp;
	unsigned long flags;
	int migratetype;
	int wasMlocked = __TestClearPageMlocked(page);

	if (!free_pages_prepare(page, 0))
		return;

	migratetype = get_pageblock_migratetype(page);
	set_page_private(page, migratetype);
	local_irq_save(flags);
	if (unlikely(wasMlocked))
		free_page_mlock(page);
	__count_vm_event(PGFREE);

	/*
	 * We only track unmovable, reclaimable and movable on pcp lists.
	 * Free ISOLATE pages back to the allocator because they are being
	 * offlined but treat RESERVE as movable pages so we can get those
	 * areas back if necessary. Otherwise, we may have to free
	 * excessively into the page allocator
	 */
	if (migratetype >= MIGRATE_PCPTYPES) {
		if (unlikely(migratetype == MIGRATE_ISOLATE)) {
			free_one_page(zone, page, 0, migratetype); /*释放到伙伴系统 */  
			goto out;
		}
		migratetype = MIGRATE_MOVABLE;
	}

	pcp = &this_cpu_ptr(zone->pageset)->pcp; /*获得zone对应cpu的pcp*/  
	if (cold)
		list_add_tail(&page->lru, &pcp->lists[migratetype]);
	else
		list_add(&page->lru, &pcp->lists[migratetype]);
	pcp->count++;
	if (pcp->count >= pcp->high) {/*当pcp中页面数量超过他的最高值时, 
		free_pcppages_bulk(zone, pcp->batch, pcp);  释放pcp->batch个页面到伙伴系统中*/ 
		pcp->count -= pcp->batch;
	}

out:
	local_irq_restore(flags);*回复中断*/ 
}
通过调用free_one_page()-->__free_one_page()来完成释放
static inline void __free_one_page(struct page *page,
		struct zone *zone, unsigned int order,
		int migratetype)
{
	unsigned long page_idx;

	if (unlikely(PageCompound(page)))
		if (unlikely(destroy_compound_page(page, order)))
			return;

	VM_BUG_ON(migratetype == -1);

	/*得到页框在所处最大块中的偏移*/
	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

	VM_BUG_ON(page_idx & ((1 << order) - 1));
	VM_BUG_ON(bad_range(zone, page));

	/*只要阶数小于MAX_ORDER-1就有合并的机会*/
	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct page *buddy;

		/*找到page所处块对应的伙伴块*/
		buddy = __page_find_buddy(page, page_idx, order);
		/*如果伙伴块不是空闲的则不执行下面的合并操作*/
		if (!page_is_buddy(page, buddy, order))
			break;

		/* Our buddy is free, merge with it and move up one order. */
		list_del(&buddy->lru);/*将伙伴块从块链表中删除*/
		zone->free_area[order].nr_free--;
		rmv_page_order(buddy);
		/*计算出合并块的起始页框的偏移*/
		combined_idx = __find_combined_index(page_idx, order);
		/*得到合并块的起始页描述符*/
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;/*修改块的起始页偏移*/
		order++;/*阶数加1表明合并完成*/
	}
	/*重新设置块的阶数*/
	set_page_order(page, order);
	/*将新块添加到对应的链表中*/
	list_add(&page->lru,
		&zone->free_area[order].free_list[migratetype]);
	zone->free_area[order].nr_free++;
}
_page_find_buddy()用来找到是释放块的伙伴,如果找到了一个空闲的伙伴块要通过_find_combined_index()用来定位合并块的起始页框,因为一个块的伙伴块有可能在该块的前面,也有可能在该块的后面,这两个函数的实现非常简洁巧妙,全是通过位操作来实现的
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
{
	unsigned long buddy_idx = page_idx ^ (1 << order);

	return page + (buddy_idx - page_idx);
}
static inline unsigned long
__find_combined_index(unsigned long page_idx, unsigned int order)
{
	return (page_idx & ~(1 << order));
}

可以举例实际证明。。。此算法比较快。。

pcp中释放页面到伙伴系统中;free_pcppages_bulk()

/*
 * Frees a number of pages from the PCP lists
 * Assumes all pages on list are in same zone, and of same order.
 * count is the number of pages to free.
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
static void free_pcppages_bulk(struct zone *zone, int count,
					struct per_cpu_pages *pcp)
{
	int migratetype = 0;
	int batch_free = 0;
	int to_free = count;
 /* 
     * 虽然管理区可以按照CPU节点分类,但是也可以跨CPU节点进行内存分配, 
     * 因此这里需要用自旋锁保护管理区  
     * 使用每CPU缓存的目的,也是为了减少使用这把锁。 
     */  
	spin_lock(&zone->lock);
	zone->all_unreclaimable = 0; /* all_unreclaimable代表了内存紧张程度,释放内存后,将此标志清除 */ 
	zone->pages_scanned = 0;
/* pages_scanned代表最后一次内存紧张以来,页面回收过程已经扫描的页数。 
    目前正在释放内存,将此清0,待回收过程随后回收时重新计数 */  

	while (to_free) {
		struct page *page;
		struct list_head *list;

		/*
		 * Remove pages from lists in a round-robin fashion. A
		 * batch_free count is maintained that is incremented when an
		 * empty list is encountered.  This is so more pages are freed
		 * off fuller lists instead of spinning excessively around empty
		 * lists
		 */
		do {
			batch_free++;
			if (++migratetype == MIGRATE_PCPTYPES)
				migratetype = 0;
			list = &pcp->lists[migratetype];
		} while (list_empty(list));/*从pcp的三类链表中找出不空的一个,释放*/

		/* This is the only non-empty list. Free them all. */
		if (batch_free == MIGRATE_PCPTYPES)
			batch_free = to_free;

		do {
			page = list_entry(list->prev, struct page, lru);
			/* must delete as __free_one_page list manipulates */
			list_del(&page->lru);
			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
			__free_one_page(page, zone, 0, page_private(page)); /*释放单个页面到伙伴系统,注意这里的分类回收*/  
			trace_mm_page_pcpu_drain(page, 0, page_private(page));
		} while (--to_free && --batch_free && !list_empty(list));
	}
	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
	spin_unlock(&zone->lock);
}


static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
	int wasMlocked = __TestClearPageMlocked(page);

	if (!free_pages_prepare(page, order))
		return;

	local_irq_save(flags);
	if (unlikely(wasMlocked))
		free_page_mlock(page);
	__count_vm_events(PGFREE, 1 << order);
	free_one_page(page_zone(page), page, order,
					get_pageblock_migratetype(page));
	local_irq_restore(flags);
}

static bool free_pages_prepare(struct page *page, unsigned int order)
{
	int i;
	int bad = 0;

	trace_mm_page_free(page, order);
	kmemcheck_free_shadow(page, order);

	if (PageAnon(page))
		page->mapping = NULL;
	for (i = 0; i < (1 << order); i++)
		bad += free_pages_check(page + i);/*页面相关检查*/
	if (bad)
		return false;

	if (!PageHighMem(page)) {
		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
		debug_check_no_obj_freed(page_address(page),
					   PAGE_SIZE << order);
	}
	arch_free_page(page, order);
	kernel_map_pages(page, 1 << order, 0);
/*调试,相关宏定义*/  
	return true;
}
static void free_one_page(struct zone *zone, struct page *page, int order,
				int migratetype)
{
	 spin_lock(&zone->lock);/*获得管理区的自旋锁*/  
	zone->all_unreclaimable = 0;/* 只要是释放了页面,都需要将此两个标志清0,表明内存不再紧张的事实*/  
	zone->pages_scanned = 0;

	__free_one_page(page, zone, order, migratetype);/*释放到指定的伙伴系统类型链表*/  
	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);*管理区空闲页面计数*/ 
	spin_unlock(&zone->lock);
}

伙伴系统内存释放或称主要流程
1,如果释放的是单个页面,需要根据页面类型考虑是否释放到伙伴系统中,同时,将其加入到pcp链表中。如果pcp链表中内存过多,调用free_pcppages_bulk()函数将大块内存放回伙伴系统中;
2,如果释放的是多个页面,直接调用__free_one_page()释放到伙伴系统中。
3,释放到伙伴系统中时,需要考虑和伙伴的合并情况。




评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值