前面说到kmalloc时基于slab分配器来实现的,其分配的物理内存时连续的,但是kmalloc一次分配的内存不能太大,现在说vmalloc,vmalloc分配的虚拟内存时连续的,其分配的区间为内存初始化时分配的从VMALLOC_START到VMALLOC_END区间,分配的虚拟内存时以PAGE_SIZE对齐的:
void *vmalloc(unsigned long size)
{
return __vmalloc_node_flags(size, NUMA_NO_NODE,
GFP_KERNEL | __GFP_HIGHMEM);
}
vmalloc函数的调用关系为:
vmalloc()->__vmalloc_node_flags()->__vmalloc_node()->__vmalloc_node_range():
void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller)
{
struct vm_struct *area;
void *addr;
unsigned long real_size = size;
size = PAGE_ALIGN(size);------------------(1)
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
goto fail;
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
vm_flags, start, end, node, gfp_mask, caller);-------(2)
if (!area)
goto fail;
addr = __vmalloc_area_node(area, gfp_mask, prot, node);--------------(4)
if (!addr)
return NULL;
/*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
* flag. It means that vm_struct is not fully initialized.
* Now, it is fully initialized, so remove this flag here.
*/
clear_vm_uninitialized_flag(area);
/*
* A ref_count = 2 is needed because vm_struct allocated in
* __get_vm_area_node() contains a reference to the virtual address of
* the vmalloc'ed block.
*/
kmemleak_alloc(addr, real_size, 2, gfp_mask);
return addr;
fail:
warn_alloc_failed(gfp_mask, 0,
"vmalloc: allocation failure: %lu bytes\n",
real_size);
return NULL;
}
(1)从这里可以看出vmalloc分配内存时以页大小对齐来分配的,即使之分配10Byte大小内存,实际也会分配一页。
(2)vmalloc的核心函数,主要用于初始化vm_struct结构体门后面将会讲到
(3)vmalloc核心函数,主要负责分配页面,并建立从虚拟地址到物理地址的映射
先看__get_vm_area_node():
static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long flags, unsigned long start,
unsigned long end, int node, gfp_t gfp_mask, const void *caller)
{
struct vmap_area *va;
struct vm_struct *area;
BUG_ON(in_interrupt());
if (flags & VM_IOREMAP)
align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
size = PAGE_ALIGN(size);--------------(1)
if (unlikely(!size))
return NULL;
area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);--------(2)
if (unlikely(!area))
return NULL;
if (!(flags & VM_NO_GUARD))
size += PAGE_SIZE;
va = alloc_vmap_area(size, align, start, end, node, gfp_mask);-----------------(3)
if (IS_ERR(va)) {
kfree(area);
return NULL;
}
setup_vmalloc_vm(area, va, flags, caller);------------(4)
return area;
}
(1)再次确认分配size为页对齐
(2)分配一个vmap_area结构体
(3)此函数比较复杂,涉及到红黑树等数据结构,主要用来从vmalloc area中去寻找一块合适的内存用于内存分配
(4)设置vm_struct以及vmap_area结构体
alloc_vmap_area():
static struct vmap_area *alloc_vmap_area(unsigned long size,
unsigned long align,
unsigned long vstart, unsigned long vend,
int node, gfp_t gfp_mask)
{
struct vmap_area *va;
struct rb_node *n;
unsigned long addr;
int purged = 0;
struct vmap_area *first;
BUG_ON(!size);
BUG_ON(size & ~PAGE_MASK);
BUG_ON(!is_power_of_2(align));
va = kmalloc_node(sizeof(struct vmap_area),
gfp_mask & GFP_RECLAIM_MASK, node);----------(1)
if (unlikely(!va))
return ERR_PTR(-ENOMEM);
/*
* Only scan the relevant parts containing pointers to other objects
* to avoid false negatives.
*/
kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
retry:
spin_lock(&vmap_area_lock);
/*
* Invalidate cache if we have more permissive parameters.
* cached_hole_size notes the largest hole noticed _below_
* the vmap_area cached in free_vmap_cache: if size fits
* into that hole, we want to scan from vstart to reuse
* the hole instead of allocating above free_vmap_cache.
* Note that __free_vmap_area may update free_vmap_cache
* without updating cached_hole_size or cached_align.
*/
if (!free_vmap_cache ||
size < cached_hole_size ||
vstart < cached_vstart ||
align < cached_align) {
nocache:
cached_hole_size = 0;
free_vmap_cache = NULL;
}
/* record if we encounter less permissive parameters */
cached_vstart = vstart;
cached_align = align;
/* find starting point for our search */
if (free_vmap_cache) {
first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
addr = ALIGN(first->va_end, align);
if (addr < vstart)
goto nocache;
if (addr + size < addr)
goto overflow;
} else {
addr = ALIGN(vstart, align);-------------(2)
if (addr + size < addr)
goto overflow;
n = vmap_area_root.rb_node;---------(3)
first = NULL;
while (n) {
struct vmap_area *tmp;
tmp = rb_entry(n, struct vmap_area, rb_node);
if (tmp->va_end >= addr) {----------(4)
first = tmp;
if (tmp->va_start <= addr)
break;
n = n->rb_left;
} else
n = n->rb_right;
}
if (!first)
goto found;
}
/* from the starting point, walk areas until a suitable hole is found */
while (addr + size > first->va_start && addr + size <= vend) {-----------(5)
if (addr + cached_hole_size < first->va_start)
cached_hole_size = first->va_start - addr;
addr = ALIGN(first->va_end, align);
if (addr + size < addr)
goto overflow;
if (list_is_last(&first->list, &vmap_area_list))------------(6)
goto found;
first = list_entry(first->list.next,
struct vmap_area, list);
}
found:
if (addr + size > vend)
goto overflow;
va->va_start = addr;
va->va_end = addr + size;
va->flags = 0;
__insert_vmap_area(va);-------------------(7)
free_vmap_cache = &va->rb_node;
spin_unlock(&vmap_area_lock);
BUG_ON(va->va_start & (align-1));
BUG_ON(va->va_start < vstart);
BUG_ON(va->va_end > vend);
return va;
overflow:
spin_unlock(&vmap_area_lock);
if (!purged) {
purge_vmap_area_lazy();
purged = 1;
goto retry;
}
if (printk_ratelimit())
printk(KERN_WARNING
"vmap allocation for size %lu failed: "
"use vmalloc=<size> to increase size.\n", size);
#ifdef CONFIG_HTC_DEBUG_VMALLOC_DUMP
if((last_dump_jiffies == 0) || time_is_before_jiffies(last_dump_jiffies + DUMP_VMALLOC_INTERVAL)) {
dump_vmallocinfo();
last_dump_jiffies = jiffies;
}
#endif
kfree(va);
return ERR_PTR(-EBUSY);
}
(1)分配一个vmap_area结构体
(2)将初始地址设置为VMALLOC_START对齐到align后的地址
(3)从红黑树vmap_area_root开始搜索整个红黑树,找到满足要求的内存块,如果此红黑树没有节点,说明系统没有使用vmalloc分配内存空间。
(4)找到起始地址最小的内存块,由于此时addr为VMALLOC_START值,固此if语句会一直满足条件,直到找到最小的内存块且其左子节点为空,此时就找到满足要求的子节点。示意图如下:
(5)while循环负责从前面已经分配了的各vmalloc的区间中的缝隙是否有合适size用于满足当前内存分配要求,如果有则分配,如果没有则一直到所有已分配的内存模块的末尾分配size大小的内存。
(6)判断当前的vmlloc节点是否为vmap_area_list的最后一个节点,如果是则跳出循环,在此内存块后面分配内存,管理vmalloc已分配的内存块一个是vmap_area_root红黑树,还有一个是双向链表vmap_area_list。
(7)将新分配的vmalloc节点添加到红黑树中。
__vmalloc_area_node():
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node)
{
const int order = 0;
struct page **pages;
unsigned int nr_pages, array_size, i;
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;---------(1)
array_size = (nr_pages * sizeof(struct page *));---------(2)
area->nr_pages = nr_pages;-----------(3)
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {--------------(4)
pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
PAGE_KERNEL, node, area->caller);
area->flags |= VM_VPAGES;
} else {
pages = kmalloc_node(array_size, nested_gfp, node);
}
area->pages = pages;-----------(5)
if (!area->pages) {
remove_vm_area(area->addr);
kfree(area);
return NULL;
}
for (i = 0; i < area->nr_pages; i++) {----------(6)
struct page *page;
if (node == NUMA_NO_NODE)
page = alloc_page(alloc_mask);
else
page = alloc_pages_node(node, alloc_mask, order);
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
area->nr_pages = i;
goto fail;
}
area->pages[i] = page;--------------(7)
if (gfp_mask & __GFP_WAIT)
cond_resched();
}
if (map_vm_area(area, prot, pages))-------------(8)
goto fail;
return area->addr;
fail:
warn_alloc_failed(gfp_mask, order,
"vmalloc: allocation failure, allocated %ld of %ld bytes\n",
(area->nr_pages*PAGE_SIZE), area->size);
vfree(area->addr);
return NULL;
}
(1)计算当前分配内存的页数。
(2)计算管理当前分配的页面所需要内存大小。
(3)vm_struct部分成员初始化,nr_pages表示结构体所管理的内存大小的页数。
(4)如果管理内存块所需要的内存大于一个页面就使用vmalloc_node分配,否则可以使用kmalloc分配。
(5)将内存管理区的首地址赋给vm_struct的pages成员,pages数组成员存放的是管理各内存页面的struct page的结构体的首地址。
(6)for循环使用alloc_pages分配页面。
(7)如(5)所述,将分配到的页面管理结构体struct page赋值给pages数组成员。
(8)建立所分配虚拟内存到物理内存的映射,由此可以看出vmalloc分配内存是在分配时建立的内存映射。
map_vm_area()->vmap_page_range_noflush():
static int vmap_page_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages)
{
pgd_t *pgd;
unsigned long next;
unsigned long addr = start;
int err = 0;
int nr = 0;
BUG_ON(addr >= end);
pgd = pgd_offset_k(addr);-----------(1)
do {
next = pgd_addr_end(addr, end);
err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
if (err)
return err;
} while (pgd++, addr = next, addr != end);
return nr;
}
(1)熟悉的节奏,和内存初始化建立虚拟内存到物理内存的映射原理一致,最后调用set_pte_at()将page的页帧号以及页面的属性填写到pte所在的地址里面。其中page到页帧号的转换流程为page->virtual address->pfn.
建立好虚拟地址到物理地址的映射后,到此vmalloc分配内存的流程结束