do_anonymous_page 函数分析
static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct mem_cgroup *memcg;
struct page *page;
vm_fault_t ret = 0;
pte_t entry;
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
if (pte_alloc(vma->vm_mm, vmf->pmd))
return VM_FAULT_OOM;
if (unlikely(pmd_trans_unstable(vmf->pmd)))
return 0;
if (!(vmf->flags & FAULT_FLAG_WRITE) &&
!mm_forbids_zeropage(vma->vm_mm)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
vma->vm_page_prot));
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (!pte_none(*vmf->pte))
goto unlock;
ret = check_stable_address_space(vma->vm_mm);
if (ret)
goto unlock;
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return handle_userfault(vmf, VM_UFFD_MISSING);
}
goto setpte;
}
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
if (!page)
goto oom;
if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg,
false))
goto oom_free_page;
__SetPageUptodate(page);
entry = mk_pte(page, vma->vm_page_prot);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (!pte_none(*vmf->pte))
goto release;
ret = check_stable_address_space(vma->vm_mm);
if (ret)
goto release;
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
mem_cgroup_cancel_charge(page, memcg, false);
put_page(page);
return handle_userfault(vmf, VM_UFFD_MISSING);
}
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, vmf->address, false);
mem_cgroup_commit_charge(page, memcg, false, false);
lru_cache_add_active_or_unevictable(page, vma);
setpte:
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
update_mmu_cache(vma, vmf->address, vmf->pte);
unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
release:
mem_cgroup_cancel_charge(page, memcg, false);
put_page(page);
goto unlock;
oom_free_page:
put_page(page);
oom:
return VM_FAULT_OOM;
}
do_anonymous_page函数流程
确保VMA不具备VM_SHARED 属性(if (vma->vm_flags & VM_SHARED))
分配并且设置对应的PMD页表项(pte_alloc(vma->vm_mm, vmf->pmd))
是否为写内存导致的缺页异常 (vmf->flags & FAULT_FLAG_WRITE)
N 创建只读页面
创建一个基于系统零页的特殊映射的页表项 pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),vma->vm_page_prot));
Y 创建可写页面
分配物理页面(page = alloc_zeroed_user_highpage_movable(vma, vmf->address);)
创建一个具有可写属性的PTE(pte_mkwrite(pte_mkdirty(entry));)
增加匿名页计数(inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);)
添加到RMAP和LRU链表中(page_add_new_anon_rmap)(lru_cache_add_active_or_unevictable)
设置到硬件页表中(set_pte_at)