kernel地址map: vmap kmap
vmap(). This can be used to make a long duration mapping of multiple physical pages into a contiguous virtual space. It needs global synchronization to unmap.
/**
* vmap - map an array of pages into virtually contiguous space
* @pages: array of page pointers
* @count: number of pages to map
* @flags: vm_area->flags
* @prot: page protection for the mapping
*
* Maps @count pages from @pages into contiguous kernel virtual space.
* If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
* (which must be kmalloc or vmalloc memory) and one reference per pages in it
* are transferred from the caller to vmap(), and will be freed / dropped when
* vfree() is called on the return value.
*
* Return: the address of the area or %NULL on failure
*/
void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot)
{
struct vm_struct *area;
unsigned long size; /* In bytes */
might_sleep();
if (count > totalram_pages())
return NULL;
size = (unsigned long)count << PAGE_SHIFT;
area = get_vm_area_caller(size, flags, __builtin_return_address(0));
if (!area)
return NULL;
if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
pages) < 0) {
vunmap(area->addr);
return NULL;
}
if (flags & VM_MAP_PUT_PAGES) {
area->pages = pages;
area->nr_pages = count;
}
return area->addr;
}
EXPORT_SYMBOL(vmap);
kmap(). This permits a short duration mapping of a single page. It needs global synchronization, but is amortized somewhat. It is also prone to deadlocks when using in a nested fashion, and so it is not recommended for new code.
/**
* kmap - Map a page for long term usage
* @page: Pointer to the page to be mapped
*
* Returns: The virtual address of the mapping
*
* Can only be invoked from preemptible task context because on 32bit
* systems with CONFIG_HIGHMEM enabled this function might sleep.
*
* For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
* this returns the virtual address of the direct kernel mapping.
*
* The returned virtual address is globally visible and valid up to the
* point where it is unmapped via kunmap(). The pointer can be handed to
* other contexts.
*
* For highmem pages on 32bit systems this can be slow as the mapping space
* is limited and protected by a global lock. In case that there is no
* mapping slot available the function blocks until a slot is released via
* kunmap().
*/
static inline void *kmap(struct page *page);
kernel 地址到用户空间的map remap_vmalloc_range remap_pfn_range
remap_vmalloc_range — map vmalloc pages to userspace
/**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
* @addr: target page aligned user address to start at
* @pfn: page frame number of kernel physical memory address
* @size: size of mapping area
* @prot: page protection flags for this mapping
*
* Note: this is only safe if the mm semaphore is held when called.
*
* Return: %0 on success, negative error code otherwise.
*/
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
pgd_t *pgd;
unsigned long next;
unsigned long end = addr + PAGE_ALIGN(size);
struct mm_struct *mm = vma->vm_mm;
unsigned long remap_pfn = pfn;
int err;
if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
return -EINVAL;
/*
* Physically remapped pages are special. Tell the
* rest of the world about it:
* VM_IO tells people not to look at these pages
* (accesses can have side effects).
* VM_PFNMAP tells the core MM that the base pages are just
* raw PFN mappings, and do not have a "struct page" associated
* with them.
* VM_DONTEXPAND
* Disable vma merging and expanding with mremap().
* VM_DONTDUMP
* Omit vma from core dump, even when VM_IO turned off.
*
* There's a horrible special case to handle copy-on-write
* behaviour that some programs depend on. We mark the "original"
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
* See vm_normal_page() for details.
*/
if (is_cow_mapping(vma->vm_flags)) {
if (addr != vma->vm_start || end != vma->vm_end)
return -EINVAL;
vma->vm_pgoff = pfn;
}
err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
if (err)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
do {
next = pgd_addr_end(addr, end);
err = remap_p4d_range(mm, pgd, addr, next,
pfn + (addr >> PAGE_SHIFT), prot);
if (err)
break;
} while (pgd++, addr = next, addr != end);
if (err)
untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
return err;
}
EXPORT_SYMBOL(remap_pfn_range);
/**
* remap_vmalloc_range - map vmalloc pages to userspace
* @vma: vma to cover (map full range of vma)
* @addr: vmalloc memory
* @pgoff: number of pages into addr before first page to map
*
* Returns: 0 for success, -Exxx on failure
*
* This function checks that addr is a valid vmalloc'ed area, and
* that it is big enough to cover the vma. Will return failure if
* that criteria isn't met.
*
* Similar to remap_pfn_range() (see mm/memory.c)
*/
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff)
{
return remap_vmalloc_range_partial(vma, vma->vm_start,
addr, pgoff,
vma->vm_end - vma->vm_start);
}
EXPORT_SYMBOL(remap_vmalloc_range);