struct bitmap{//位图
uint32_t btmp_bytes_len;
uint8_t *bits;//位图起始地址
};
struct virtual_addr{//虚拟地址
struct bitmap vaddr_bitmap;//虚拟地址的位图
uint32_t vaddr_start;//32位虚拟地址
};
struct pool{
struct bitmap pool_bitmap;//内存池位图
uint32_t phy_addr_start;//起始物理地址
uint32_t pool_size;
struct lock lock;
};
struct pool kernel_pool, user_pool;
struct virtual_addr kernel_vaddr;
用户进程在三级栈发生中断时,cpu自动从tss中获得零级栈的esp,切换esp到0级栈,保存ss,cs,eip,通用寄存器,段寄存器,之后再进入特殊的中断处理程序。
与线程切换不同的是,先加载新的进程的页表地址到cr3,然后将新进程的0级栈指针存入css,在调用switch to;
中断过程
intr_entry_table//内核汇编中的终端处理函数数组
static struct gate_desc idt[IDT_DESC_CNT]//中断描述符表,要在终端初始化的时候加载到中断描述符表寄存器
intr_handler idt_table[IDT_DESC_CNT];//中断处理函数表,内核的中断处理函数一般会跳转到具体的处理函数
static void idt_desc_init(void){
int i, lastindex = IDT_DESC_CNT - 1;
for (i = 0; i < IDT_DESC_CNT; i++)
{
make_idt_desc(&idt[i], IDT_DESC_ATTR_DPL0, intr_entry_table[i]);
}//将内核的中断处理函数写入中断描述符
make_idt_desc(&idt[lastindex], IDT_DESC_ATTR_DPL3, syscall_handler);
put_str(" idt_desc_init done\n");//int 0x80的起始处理函数不是内核中的汇编模板,而是单独的,也不会跳转到一般的具体处理函数。而是调用具体的int 0x80系统调用函数表
}
堆内存管理需要的数据结构,
struct arena{
struct mem_block_desc *desc;//本arena指向哪一种规格的内存描述符
uint32_t cnt;
bool large;//为true时是代表分配的大页框,cnt为页框数,否则是还剩多少空闲块
};
struct mem_block_desc k_block_descs[DESC_CNT];//大小位7的数组,存储7种内存描述符
struct mem_block{//小的内存块
struct list_elem free_elem;
};
struct mem_block_desc{//某一规格的内存块描述符只有7种规格,7个描述符
uint32_t block_size;//此种内存块,每块大小
uint32_t blocks_per_arena;//每个页框arena可以容纳多少块内存块,(4096-sizeof(arena))/block_size
struct list free_list;//空闲内存块组成的链表,所有同一规格的arena的内存块,连在一张链表上
};
开始初始化每个内存描述符
void block_desc_init(struct mem_block_desc* desc_array){
uint16_t desc_idx, block_size = 16;
for (desc_idx = 0; desc_idx < DESC_CNT;desc_idx++){
desc_array[desc_idx].block_size = block_size;//每种内存规格位16,16^2,...,16^6
desc_array[desc_idx].blocks_per_arena = (PG_SIZE - sizeof(struct arena)) / block_size;//每个arena去掉arena剩余的部分除每块内存块大小
list_init(&desc_array[desc_idx].free_list);//初始化当前内存描述符的空闲块链表
block_size *= 2;
}
}
进程控制块中加入每个用户进程的堆内存描述符数组
struct task_struct{
uint32_t *self_kstack;
pid_t pid;
enum task_status status;
uint8_t priority;
char name[16];
uint8_t tricks;
uint32_t elapsed_tricks;
struct list_elem general_tag;
struct list_elem all_list_tag;
uint32_t *pgdir;
struct virtual_addr userprog_vaddr;
struct mem_block_desc u_block_desc[DESC_CNT];//堆内存描述符数组
uint32_t stack_magic;
};
内核内存初始化函数加入堆内存描述符数组的初始化
void mem_init(){
put_str("mem_init start\n");
uint32_t mem_bytes_total = (*(uint32_t *)(0xb00));
mem_pool_init(mem_bytes_total);
block_desc_init(k_block_descs);//堆内存描述符数组的初始化
put_str("mem_init done\n");
}
每个用户进程在初始化的时候也要初始化用户进程自己的堆内存描述符数组
void process_execute(void* filename,char* name){
struct task_struct *thread = get_kernel_pages(1);
init_thread(thread, name, default_prio);
create_user_vaddr_bitmap(thread);
thread_create(thread, start_process, filename);
thread->pgdir = create_page_dir();
block_desc_init(thread->u_block_desc);//初始化用户进程自己的堆内存描述符数组
enum intr_status old_status = intr_disable();
ASSERT(!elem_find(&thread_ready_list, &thread->general_tag));
list_append(&thread_ready_list, &thread->general_tag);
ASSERT(!elem_find(&thread_all_list, &thread->all_list_tag));
list_append(&thread_all_list, &thread->all_list_tag);
intr_set_status(old_status);
}
准备工作完成
1.sys_malloc过程
两个地址解析函数:
static struct mem_block* arena2block(struct arena* a,uint32_t idx){
return (struct mem_block *)((uint32_t)a + sizeof(struct arena) + idx * a->desc->block_size);
}//根据arena的地址,返回此arena第idx个内存块地址
static struct arena* block2arena(struct mem_block* b){
return (struct arena *)((uint32_t)b & 0xfffff000);
}//根据block的地址,返回所属的arena地址
sys_malloc函数功能,分配大小为size byte的内存,返回首地址指针
void* sys_malloc(uint32_t size){
enum pool_flags PF;//保存是否内核内存池或者用户内存池
struct pool *mem_pool;//内存池指针
uint32_t pool_size;
struct mem_block_desc *descs;//当前进程或线程堆内存描述符数组
struct task_struct *cur_thread = running_thread();//获取当前线程或者进程控制块地址
if(cur_thread->pgdir==NULL){//根据是否有用户进程页表,判断是进程还是线程
PF = PF_KERNEL;
pool_size = kernel_pool.pool_size;
mem_pool = &kernel_pool;
descs = k_block_descs;
}
else{
PF = PF_USER;
pool_size = user_pool.pool_size;
mem_pool = &user_pool;
descs = cur_thread->u_block_desc;
}
if(!(size>0&&size<pool_size)){
return NULL;
}
struct arena *a;//初始化area
struct mem_block *b;
lock_acquire(&mem_pool->lock);//对内存池上锁
if(size>1024){//如果分配大小大于1024
uint32_t page_cnt = DIV_ROUND_UP(size + sizeof(struct arena), PG_SIZE);//得到向上取整的页框数
a = malloc_page(PF, page_cnt);//分配数量为page_cnt的页框,返回首地址给arena
if(a!=NULL){
memset(a, 0, page_cnt * PG_SIZE);
a->desc = NULL;//并不是小堆内存分配,而是大页框分配,不需要堆内存描述符
a->cnt = page_cnt;
a->large = true;
lock_release(&mem_pool->lock);
return (void *)(a + 1);//我们需要的是arena跨过前面arena信息之后的空白内存,所以将跨过arena的首地址返回
}
else{
lock_release(&mem_pool->lock);
return NULL;
}
}
else{
uint8_t desc_idx;
for (desc_idx = 0; desc_idx < DESC_CNT;desc_idx++){
if(size<=descs[desc_idx].block_size){//如果小于1024,选择刚好可以套住需要分配内存的规格
break;
}
}
if(list_empty(&descs[desc_idx].free_list)){//如果此内存描述符空闲链表为空,先安装空闲链表,不然直接分配
a = malloc_page(PF, 1);//分配一个页框给arena
if(a==NULL){
lock_release(&mem_pool->lock);
return NULL;
}
memset(a, 0, PG_SIZE);
a->desc = &descs[desc_idx];//arena指向当前规格的堆内存描述符
a->large = false;
a->cnt = descs[desc_idx].blocks_per_arena;//有多少个此规格的空闲块
uint32_t block_idx;
enum intr_status old_status = intr_disable();//关中断
for (block_idx = 0; block_idx < descs[desc_idx].blocks_per_arena;block_idx++){
b = arena2block(a, block_idx);//获取arena中每个内存块地址
ASSERT(!elem_find(&a->desc->free_list, &b->free_elem));
list_append(&a->desc->free_list, &b->free_elem);//取每个内存块的元素,即free_elem,再对其取地址,将此地址加入空闲链表中。
}
intr_set_status(old_status);
}
b = elem2entry(struct mem_block, free_elem, list_pop(&(descs[desc_idx].free_list)));//将空闲链表头的一个空闲块首地址弹出,将free_elem地址还原成mem_block地址
memset(b, 0, descs[desc_idx].block_size);//将内存块清空
a = block2arena(b);//获取此内存块arena的地址
a->cnt--;//将此arena的可用块数量减去1
lock_release(&mem_pool->lock);
return (void *)b;
}
}