// map kernel text executable and read-only. kvmmap(kpgtbl, KERNBASE, KERNBASE, (uint64)etext-KERNBASE, PTE_R | PTE_X);
// map kernel data and the physical RAM we'll make use of. kvmmap(kpgtbl, (uint64)etext, (uint64)etext, PHYSTOP-(uint64)etext, PTE_R | PTE_W);
// map the trampoline for trap entry/exit to // the highest virtual address in the kernel. kvmmap(kpgtbl, TRAMPOLINE, (uint64)trampoline, PGSIZE, PTE_R | PTE_X);
// map kernel stacks beneath the trampoline, // each surrounded by invalid guard pages. #define KSTACK(p) (TRAMPOLINE - ((p)+1)* 2*PGSIZE)
// Allocate a page for each process's kernel stack. // Map it high in memory, followed by an invalid // guard page. void proc_mapstacks(pagetable_t kpgtbl) { structproc *p; for(p = proc; p < &proc[NPROC]; p++) { char *pa = kalloc(); if(pa == 0) panic("kalloc"); uint64 va = KSTACK((int) (p - proc)); kvmmap(kpgtbl, va, (uint64)pa, PGSIZE, PTE_R | PTE_W); } }
其中 kvmmap 的实现如下,主要功能为遍历要map的虚拟地址空间,通过 walk 函数模拟MMU功能,然后将对应的PPN和标记写成PTE形式:
// add a mapping to the kernel page table. // only used when booting. // does not flush TLB or enable paging. void kvmmap(pagetable_t kpgtbl, uint64 va, uint64 pa, uint64 sz, int perm) { if(mappages(kpgtbl, va, sz, pa, perm) != 0) panic("kvmmap"); }
// Create PTEs for virtual addresses starting at va that refer to // physical addresses starting at pa. va and size might not // be page-aligned. Returns 0 on success, -1 if walk() couldn't // allocate a needed page-table page. int mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm) { uint64 a, last; pte_t *pte;
if(size == 0) panic("mappages: size"); a = PGROUNDDOWN(va); last = PGROUNDDOWN(va + size - 1); for(;;){ if((pte = walk(pagetable, a, 1)) == 0) return-1; if(*pte & PTE_V) panic("mappages: remap"); *pte = PA2PTE(pa) | perm | PTE_V; if(a == last) break; a += PGSIZE; pa += PGSIZE; } return0; }
// Create a user page table for a given process, // with no user memory, but with trampoline pages. pagetable_t proc_pagetable(struct proc *p) { pagetable_t pagetable;
// map the trampoline code (for system call return) // at the highest user virtual address. // only the supervisor uses it, on the way // to/from user space, so not PTE_U. if(mappages(pagetable, TRAMPOLINE, PGSIZE, (uint64)trampoline, PTE_R | PTE_X) < 0){ uvmfree(pagetable, 0); return0; }
// map the trapframe just below TRAMPOLINE, for trampoline.S. if(mappages(pagetable, TRAPFRAME, PGSIZE, (uint64)(p->trapframe), PTE_R | PTE_W) < 0){ uvmunmap(pagetable, TRAMPOLINE, 1, 0); uvmfree(pagetable, 0); return0; }
return pagetable; }
使用 uvmalloc(kernel/exec.c:52)为每一个 ELF 段分配内存
使用 loadseg(kernel/exec.c:10)加载每一个段到内存中。
exec 通过如下代码新建一个用户栈:
1 2 3 4 5 6 7 8 9 10
// Allocate two pages at the next page boundary. // Use the second as the user stack. sz = PGROUNDUP(sz); uint64 sz1; if((sz1 = uvmalloc(pagetable, sz, sz + 2*PGSIZE)) == 0) goto bad; sz = sz1; uvmclear(pagetable, sz-2*PGSIZE); sp = sz; stackbase = sp - PGSIZE;
// Given a parent process's page table, copy // its memory into a child's page table. // Copies both the page table and the // physical memory. // returns 0 on success, -1 on failure. // frees any allocated pages on failure. int uvmcopy(pagetable_t old, pagetable_t new, uint64 sz) { pte_t *pte; uint64 pa, i; uint flags; char *mem;
for(i = 0; i < sz; i += PGSIZE){ if((pte = walk(old, i, 0)) == 0) panic("uvmcopy: pte should exist"); if((*pte & PTE_V) == 0) panic("uvmcopy: page not present"); pa = PTE2PA(*pte); flags = PTE_FLAGS(*pte); if((mem = kalloc()) == 0) goto err; memmove(mem, (char*)pa, PGSIZE); if(mappages(new, i, PGSIZE, (uint64)mem, flags) != 0){ kfree(mem); goto err; } } return0;