内存管理的基本思路来源于川合作秀实先生的《30天自制操作系统》,但本管理模块基于处理器的分页机制,所以又多少有郑钢先生《操作系统真相还原》的影子。他还存在很多问题,不过还是等明天再解决吧!
内存管理模块也是稍稍的长了一些,但是只要仔细阅读还是蛮好懂得。
// memory.h
#ifndef __MEMORY_H
#define __MEMORY_H
#include "thread.h"
#define pde_idx(vaddr) (((vaddr) & 0xffc00000) >> 22) // 虚拟地址在pde中的下标
#define pte_idx(vaddr) (((vaddr) & 0x003ff000) >> 12) // 虚拟地址在pte中的下标
#define offset(struct_type, member) (int)(&((struct_type*)0)->member)
#define elem2entry(struct_type, struct_member_name, elem_ptr) \
(struct_type*)((int)elem_ptr - offset(struct_type, struct_member_name))
void page_init(void);
struct mem_desc* get_mem_desc(struct mem_man* mm);
void mem_man_init(struct mem_man* mm, unsigned int mem_desc_array_base,
unsigned int mem_start, unsigned int mem_byte_size);
void mem_init(void);
unsigned int addr_alloc(struct mem_man* mm, unsigned int size);
unsigned int pg_alloc(struct mem_man* mm, unsigned int pg_cnt);
unsigned int one_pg_alloc(struct mem_man* mm);
struct mem_desc* addr_free(struct mem_man* mm, unsigned int start);
unsigned int* pde_ptr(unsigned int vaddr);
unsigned int* pte_ptr(unsigned int vaddr);
void page_table_add(unsigned int vir_addr, unsigned int phy_addr);
void* malloc_page(struct mem_man* mm, unsigned int pg_cnt);
void* get_kernel_pages(unsigned int pg_cnt);
void page_table_pte_remove(unsigned int vir_addr);
unsigned int mfree_page(struct mem_man* mm, unsigned int vir_addr_start);
unsigned int vaddr2paddr(unsigned int vaddr);
void* get_user_pages(unsigned int pg_cnt);
void uservaddr_memman_init(struct mem_man* mm, unsigned int mem_start,
unsigned int mem_byte_size);
void * get_a_page(unsigned int vaddr);
#endif
// memory.c
#include "list.h"
#include "memory.h"
#include "string.h"
#include "debug.h"
#include "intr_status_op.h"
struct mem_man kernel_phy, user_phy, kernel_vir;
void page_init(void) {
// 内核页目录表基地址。
unsigned int* pde_base = (unsigned int*)0x8000;
// 内核页表基地址。
unsigned int* pte_base = pde_base + 0x1000;
// 显存页表基地址。
unsigned int* video_pte_base = pte_base + 0x8000;
int i;
// 清空页目录表。
for(i = 0; i < 1024; i++) {
pde_base[i] = 0;
}
// 内核占用物理地址前32M,对等映射到虚拟地址前32M。
for(i = 0; i < 8; i++) {
pde_base[i] = (unsigned int)pte_base + i * 0x1000 + 0x7;
}
// 显存采取对等映射,因此0xe0000000也映射到物理地址的0xe0000000。
for(i = 0; i < 8; i++) {
pde_base[0xe0000000 / 0x400000 + i] = (unsigned int)video_pte_base +
i * 0x1000 + 0x7;
}
//从0开始计算的页目录表第1022项是页目录表的物理地址藏身处,
//因此要在分页机制下访问页目录表,即是访问指针
//(unsigned int*)0xffbfe000。
pde_base[1022] = (unsigned int)pde_base + 0x7;
// 建立对等映射前32M的页表,它紧跟在页目录表后边。
for(i = 0; i < 1024 * 8; i++) {
pte_base[i] = 0x00000000 + i * 0x1000 + 0x7;
}
// 建立显存对等映射的页表,它紧跟在内核页表后边,也是32M。
for(i = 0; i < 1024 * 8; i++) {
video_pte_base[i] = 0xe0000000 + i * 0x1000 + 0x7;
}
__asm__ __volatile__ ("movl $0x00008000, %%eax; movl %%eax, %%cr3":);
__asm__ __volatile__ ("movl %%cr0, %%eax;or $0x80000000, %%eax;\
movl %%eax, %%cr0":);
__asm__("ljmp $0x08, $1f;1:");
}
struct mem_desc* get_mem_desc(struct mem_man* mm) {
int i;
unsigned int cnt;
if((mm == &kernel_phy) || (mm == &kernel_vir) || (mm == &user_phy)) {
cnt = 8000;
} else {
cnt = 2000;
}
// 10000项个内存描述符,将占用49个自然页的内存。
for(i = 0; i < cnt; i++) {
if(mm->md_a[i].status == -1) {
return &mm->md_a[i];
}
}
return NULL;
}
void mem_man_init(struct mem_man* mm, unsigned int mem_desc_array_base,
unsigned int mem_start, unsigned int mem_byte_size) {
ASSERT((mem_start >= 0x02000000));
mm->md_a = (struct mem_desc*)mem_desc_array_base; // ?M
int i;
for(i = 0; i < 8000; i++) {
mm->md_a[i].status = -1;
}
list_init(&mm->free);
list_init(&mm->used);
mm->md_a[0].start = mem_start;
mm->md_a[0].size = mem_byte_size;
mm->md_a[0].status = 0;
list_append(&mm->free, &mm->md_a[0].tag);
}
void mem_init(void) {
mem_man_init(&kernel_phy, 0x01f00000,
0x02000000, 0x40000000 - 0x02000000);
mem_man_init(&user_phy, 0x01f00000 + sizeof(struct mem_desc) * 8000,
0x40000000, 0x80000000 - 0x40000000);
mem_man_init(&kernel_vir, 0x01f00000 + 2 * sizeof(struct mem_desc) * 8000,
0x02000000, 0x80000000 - 0x02000000);
lock_init(&kernel_phy.lock);
lock_init(&user_phy.lock);
lock_init(&kernel_vir.lock);
}
unsigned int addr_alloc(struct mem_man* mm, unsigned int size) {
ASSERT((mm != NULL));
struct list_elem* t = mm->free.head.next;
struct mem_desc* md;
ASSERT((!list_empty(&mm->free)));
while(t != &mm->free.tail) {
md = elem2entry(struct mem_desc, tag, t);
if(md->size >= size) {
break;
}
t = t->next;
}
if(t == &mm->free.tail) {
return 0;
}
struct mem_desc* md0 = get_mem_desc(mm);
ASSERT((md0 != NULL));
md0->start = md->start;
md0->size = size;
md0->status = 1;
md->size -= size;
md->start += size;
list_append(&mm->used, &md0->tag);
if(!(md->size)) {
md->status = -1;
list_remove(&md->tag);
}
return md0->start;
}
unsigned int pg_alloc(struct mem_man* mm, unsigned int pg_cnt) {
ASSERT((pg_cnt > 0));
return addr_alloc(mm, pg_cnt * 4096);
}
unsigned int one_pg_alloc(struct mem_man* mm) {
return addr_alloc(mm, 1 * 4096);
}
struct mem_desc* addr_free(struct mem_man* mm, unsigned int start) {
struct list_elem* t = mm->used.head.next;
struct mem_desc* md;
while(t != &mm->used.tail) {
md = elem2entry(struct mem_desc, tag, t);
if(md->start == start) {
break;
}
t = t->next;
}
if(t == &mm->used.tail) {
return NULL;
}
list_remove(&md->tag);
if(list_empty(&mm->free)) {
md->status = 0;
list_append(&mm->free, &md->tag);
return md;
}
struct mem_desc* md_l, *md_r;
t = mm->free.head.next;
while(t != &mm->free.tail) {
md_r = elem2entry(struct mem_desc, tag, t);
if(md_r->start > md->start) {
break;
}
t = t->next;
}
if(t == mm->free.head.next) {
if(md->start + md->size == md_r->start) {
md_r->start = md->start;
md_r->size += md->size;
md->status = -1;
return md;
}
md->status = 0;
list_push(&mm->free, &md->tag);
return md;
}
md_l = elem2entry(struct mem_desc, tag, t->prev);
if(t == &mm->free.tail) {
if(md_l->start + md_l->size == md->start) {
md_l->size += md->size;
md->status = -1;
return md;
}
md->status = 0;
list_append(&mm->free, &md->tag);
return md;
}
if((md_l->start + md_l->size == md->start) &&
(md->start + md->size == md_r->start))
{
md_l->size += md->size + md_r->size;
md_r->status = -1;
list_remove(&md_r->tag);
md->status = -1;
return md;
}
if(md_l->start + md_l->size == md->start) {
md_l->size += md->size;
md->status = -1;
return md;
}
if(md->start + md->size == md_r->start) {
md_r->start = md->start;
md_r->size += md->size;
md->status = -1;
return md;
}
md->status = 0;
list_insert_before(&md_r->tag, &md->tag);
return md;
}
unsigned int* pde_ptr(unsigned int vaddr) {
unsigned int* pde = (unsigned int*)(0xffbfe000 + (pde_idx(vaddr) << 2));
return pde;
}
unsigned int* pte_ptr(unsigned int vaddr) {
unsigned int* pte = (unsigned int*)(0xff800000 +
((vaddr & 0xffc00000) >> 10)
+ (pte_idx(vaddr) << 2));
return pte;
}
void page_table_add(unsigned int vir_addr, unsigned int phy_addr){
unsigned int* pde = pde_ptr(vir_addr);
unsigned int* pte = pte_ptr(vir_addr);
if(*pde & 0x00000001) {
ASSERT(!(*pte & 0x00000001));
if(!(*pte & 0x00000001)) {
*pte = phy_addr + 0x7;
} else {
PANIC("should not be here!, pte repeat");
}
} else {
unsigned int page_table_phy_vaddr = one_pg_alloc(&kernel_phy);
*pde = page_table_phy_vaddr + 0x7;
memset_((void*)((unsigned int)pte & 0xfffff000), 0, 4096);
ASSERT(!(*pte & 0x00000001));
*pte = phy_addr + 0x7;
}
}
void* malloc_page(struct mem_man* mm, unsigned int pg_cnt) {
unsigned vir_addr = pg_alloc(mm, pg_cnt);
if(!vir_addr) {
return NULL;
}
struct mem_man* mm0;
if(mm != &kernel_vir) {
mm0 = &user_phy;
} else {
mm0 = &kernel_phy;
}
void* res = (void*)vir_addr;
unsigned int phy_addr = pg_alloc(mm0, pg_cnt);
if(!phy_addr) {
return NULL;
}
int i;
for(i = 0; i < pg_cnt; i++) {
page_table_add(vir_addr, phy_addr);
vir_addr += 4096;
phy_addr += 4096;
}
return res;
}
void* get_kernel_pages(unsigned int pg_cnt) {
enum intr_status old_status = intr_disable();
void* vir_addr = malloc_page(&kernel_vir, pg_cnt);
if(!vir_addr) {
memset_(vir_addr, 0, pg_cnt * 4096);
}
intr_set_status(old_status);
return vir_addr;
}
void* get_user_pages(unsigned int pg_cnt) {
lock_acquire(&user_phy.lock);
struct task* cur = running_thread();
void* vir_addr = malloc_page(&cur->user_vir, pg_cnt);
if(!vir_addr) {
memset_(vir_addr, 0, pg_cnt * 4096);
}
lock_release(&user_phy.lock);
return vir_addr;
}
void page_table_pte_remove(unsigned int vir_addr) {
unsigned int* pte = pte_ptr(vir_addr);
*pte &= ~0x1;
asm volatile("invlpg %0"::"m"(vir_addr):"memory");
}
unsigned int mfree_page(struct mem_man* mm, unsigned int vir_addr_start) {
ASSERT((mm != NULL));
struct mem_man* mm0;
if(mm != &kernel_vir) {
mm0 = &user_phy;
} else {
mm0 = &kernel_phy;
}
unsigned int phy_addr_start = vaddr2paddr(vir_addr_start);
int i;
struct mem_desc* md = addr_free(mm0, phy_addr_start);
if(!md) {
return 0;
}
unsigned int t = vir_addr_start;
for(i = 0; i < md->size / 4096; i++) {
page_table_pte_remove(t);
t += 4096;
}
if(!md) {
return 0;
}
md = addr_free(mm, vir_addr_start);
return 1;
}
unsigned int vaddr2paddr(unsigned int vaddr) {
return (unsigned int)((*(pte_ptr(vaddr)) & 0xfffff000) + ((vaddr) & 0x00000fff));
}
void uservaddr_memman_init(struct mem_man* mm, unsigned int mem_start,
unsigned int mem_byte_size) {
ASSERT((mem_start >= 0x80000000));
mm->md_a = (struct mem_desc*)get_kernel_pages(10);
int i;
for(i = 0; i < 2000; i++) {
mm->md_a[i].status = -1;
}
list_init(&mm->free);
list_init(&mm->used);
mm->md_a[0].start = mem_start;
mm->md_a[0].size = mem_byte_size;
mm->md_a[0].status = 0;
list_append(&mm->free, &mm->md_a[0].tag);
}
void * get_a_page(unsigned int vaddr) {
lock_acquire(&user_phy.lock);
struct task* cur = running_thread();
struct mem_man* mm = &cur->user_vir;
if(cur->pgdir != NULL) {
struct list_elem* t = mm->free.head.next;
struct mem_desc* md;
ASSERT((!list_empty(&mm->free)));
while(t != &mm->free.tail) {
md = elem2entry(struct mem_desc, tag, t);
if(md != NULL) {
break;
}
t = t->next;
}
if(t == &mm->free.tail) {
return NULL;
}
struct mem_desc* md0 = get_mem_desc(mm);
ASSERT((md0 != NULL));
if((list_len(&mm->free) == 1) && (list_len(&mm->used) == 0)) {
md0->start = vaddr;
md0->size = 2 * 4096;
md0->status = 1;
md->size -= 2 * 4096;
list_append(&mm->used, &md0->tag);
}
}
unsigned int paddr = pg_alloc(&user_phy, 2);
page_table_add(vaddr, paddr);
lock_release(&user_phy.lock);
return (void*) vaddr;
}