已完成实验
简介
实验 22. 堆内存管理
总结
-
用户的内存块描述数组在 pcb 中
-
定义了内存块描述数组,描述符的链表里放着申请了但是没有使用的内存块
-
申请 size 大小的内存时,本质申请到的是 sizeof(arena) + size(假设叫 buffer) 的长度
- 若 size>1024, 那么 arena 把 buffer 当成一个内存块,直接返回 buffer 的首地址
- 若 size<1024,那么 arena 把 buffer 分成 n 份内存块,把每一个内存块记录在描述符的链表中
从链表中弹出一个内存块 修改 arena 的 cnt, 返回内存块的首地址
-
定义分配和释放内存的用户调用
主要代码
thread.h
memory.h
memory.c
/// @brief arena 管理内存块
struct arena {
struct mem_block_desc* desc;
uint32_t cnt;
bool large; // large为ture时,cnt表示的是页框数,否则cnt表示空闲mem_block数量
};
struct mem_block_desc k_block_descs[DESC_CNT]; // 内核内存块描述符数组,用户的在pcb
//---堆内存管理 begin---
#pragma region heap_manage
/// @brief 返回arena中第idx个内存块的地址
static struct mem_block* arena2block(struct arena* a, uint32_t idx) {
// a + sizeof(a) + idx * block * size
return (struct mem_block*)((uint32_t)a + sizeof(struct arena) + idx * a->desc->block_size);
}
/// @brief 返回内存块b所在的arena地址
static struct arena* block2arena(struct mem_block* b) {
// b - b % 4096
return (struct arena*)((uint32_t)b & 0xfffff000);
}
/// @brief 分配size个字节
/// @param size 字节数
/// @return 虚拟地址
void* sys_malloc(uint32_t size) {
enum pool_flags PF;
uint32_t pool_size;
struct pool* mem_pool;
struct mem_block_desc* descs;
// 判断用哪个内存池
struct task_struct* cur_thread = running_thread();
if (cur_thread->pgdir == NULL) { // 若为内核线程
PF = PF_KERNEL;
pool_size = kernel_pool.pool_size;
mem_pool = &kernel_pool;
descs = k_block_descs;
} else { // 用户进程
PF = PF_USER;
pool_size = user_pool.pool_size;
mem_pool = &user_pool;
descs = cur_thread->u_block_desc;
}
// 若申请的内存 > 内存池容量, 直接返回NULL
if (!(size > 0 && size < pool_size)) { return NULL; }
struct arena* a;
struct mem_block* b;
lock_acquire(&mem_pool->lock);
// // 超过了最大的内存块描述的块大小1024,就直接申请物理页返回给他
if (size > 1024) {
// 向上取整需要的页框数
// 这段内存是,结构体arena + size大小的buffer ,arena用来描述buffer
uint32_t page_cnt = DIV_ROUND_UP(size + sizeof(struct arena), PG_SIZE);
a = malloc_page(PF, page_cnt);
// 设置arena 大块页框
if (a != NULL) {
memset(a, 0, page_cnt * PG_SIZE);
a->desc = NULL; // 将desc置为NULL
a->cnt = page_cnt; // cnt置为页框数
a->large = true; // large置为true
lock_release(&mem_pool->lock);
return (void*)(a + 1); // 返回buffer的首地址
} else {
lock_release(&mem_pool->lock);
return NULL;
}
} else { // 直接拿适配的内存块描述的可用块链表中的块
// 找到大小合适的内存块描述索引
uint8_t desc_idx;
for (desc_idx = 0; desc_idx < DESC_CNT; desc_idx++) {
if (size <= descs[desc_idx].block_size) { break; }
}
// 如果这个内存块描述符的可用块用光了,申请一页,补充块
if (list_empty(&descs[desc_idx].free_list)) {
// 申请一页 一页 = arena + n * 内存块
a = malloc_page(PF, 1);
if (a == NULL) {
lock_release(&mem_pool->lock);
return NULL;
}
memset(a, 0, PG_SIZE);
// 设置 arena
a->desc = &descs[desc_idx]; // desc置为相应内存块描述符
a->cnt = descs[desc_idx].blocks_per_arena; // cnt置为此arena可用的内存块数
a->large = false; // arge置为false
// 把内存块加入可用链表
uint32_t block_idx;
enum intr_status old_status = intr_disable();
for (block_idx = 0; block_idx < descs[desc_idx].blocks_per_arena; block_idx++) {
b = arena2block(a, block_idx);
ASSERT(!elem_find(&a->desc->free_list, &b->free_elem));
list_append(&a->desc->free_list, &b->free_elem);
}
intr_set_status(old_status);
}
// 从内存块描述符的可用块链表中弹出一个
b = elem2entry(struct mem_block, free_elem, list_pop(&(descs[desc_idx].free_list)));
memset(b, 0, descs[desc_idx].block_size);
a = block2arena(b); // 获取内存块b所在的arena
a->cnt--; // 将此arena中的空闲内存块数减1
lock_release(&mem_pool->lock);
return (void*)b;
}
}
/// @brief 把物理地址对应的位图比特标记空闲
/// @param pg_phy_addr 将物理地址
void pfree(uint32_t pg_phy_addr) {
struct pool* mem_pool;
uint32_t bit_idx = 0;
if (pg_phy_addr >= user_pool.phy_addr_start) { // 用户物理内存池
mem_pool = &user_pool;
bit_idx = (pg_phy_addr - user_pool.phy_addr_start) / PG_SIZE;
} else { // 内核物理内存池
mem_pool = &kernel_pool;
bit_idx = (pg_phy_addr - kernel_pool.phy_addr_start) / PG_SIZE;
}
bitmap_set(&mem_pool->pool_bitmap, bit_idx, 0); // 将位图中该位清0
}
/// @brief 去掉页表中虚拟地址vaddr的映射,只去掉vaddr对应的pte
/// @param vaddr
static void page_table_pte_remove(uint32_t vaddr) {
uint32_t* pte = pte_ptr(vaddr);
*pte &= ~PG_P_1; // 将页表项pte的P位置0
// 更新Translation Lookaside Buffer,可以理解为CPU地址转换缓存
asm volatile("invlpg %0" ::"m"(vaddr) : "memory"); // 更新tlb
}
/// @brief 把虚拟地址开始pg_cnt个页的对应的位图比特标记空闲
/// @param pool_flags 标记
/// @param _vaddr 虚拟地址
/// @param pg_cnt 页框数
static void vaddr_remove(enum pool_flags pf, void* _vaddr, uint32_t pg_cnt) {
uint32_t bit_idx_start = 0, vaddr = (uint32_t)_vaddr, cnt = 0;
if (pf == PF_KERNEL) { // 内核虚拟内存池
bit_idx_start = (vaddr - kernel_vaddr.vaddr_start) / PG_SIZE;
while (cnt < pg_cnt) { bitmap_set(&kernel_vaddr.vaddr_bitmap, bit_idx_start + cnt++, 0); }
} else { // 用户虚拟内存池
struct task_struct* cur_thread = running_thread();
bit_idx_start = (vaddr - cur_thread->userprog_vaddr.vaddr_start) / PG_SIZE;
while (cnt < pg_cnt) {
bitmap_set(&cur_thread->userprog_vaddr.vaddr_bitmap, bit_idx_start + cnt++, 0);
}
}
}
/// @brief 释放以虚拟地址vaddr为起始的cnt个页框
/// @param pool_flags 哪个空间
/// @param _vaddr 虚拟地址
/// @param pg_cnt 页框数
void mfree_page(enum pool_flags pf, void* _vaddr, uint32_t pg_cnt) {
uint32_t pg_phy_addr;
uint32_t vaddr = (int32_t)_vaddr, page_cnt = 0;
ASSERT(pg_cnt >= 1 && vaddr % PG_SIZE == 0);
pg_phy_addr = addr_v2p(vaddr); // 获取虚拟地址vaddr对应的物理地址
// 确保待释放的物理内存在低端1M+1k大小的页目录+1k大小的页表地址范围外
ASSERT((pg_phy_addr % PG_SIZE) == 0 && pg_phy_addr >= 0x102000);
// 判断pg_phy_addr属于用户物理内存池还是内核物理内存池
if (pg_phy_addr >= user_pool.phy_addr_start) { // 位于user_pool内存池
vaddr -= PG_SIZE;
while (page_cnt < pg_cnt) {
vaddr += PG_SIZE;
pg_phy_addr = addr_v2p(vaddr);
// 确保物理地址属于用户物理内存池
ASSERT((pg_phy_addr % PG_SIZE) == 0 && pg_phy_addr >= user_pool.phy_addr_start);
// 把物理地址对应的位图比特标记空闲
pfree(pg_phy_addr);
// 再从页表中清除此虚拟地址所在的页表项pte
page_table_pte_remove(vaddr);
page_cnt++;
}
// 把虚拟地址对应的位图比特标记空闲
vaddr_remove(pf, _vaddr, pg_cnt);
} else { // 位于kernel_pool内存池
vaddr -= PG_SIZE;
while (page_cnt < pg_cnt) {
vaddr += PG_SIZE;
pg_phy_addr = addr_v2p(vaddr);
// 确保待释放的物理内存只属于内核物理内存池
ASSERT((pg_phy_addr % PG_SIZE) == 0 && pg_phy_addr >= kernel_pool.phy_addr_start &&
pg_phy_addr < user_pool.phy_addr_start);
// 把物理地址对应的位图比特标记空闲
pfree(pg_phy_addr);
// 清空pte,打断虚拟地址和物理地址映射
page_table_pte_remove(vaddr);
page_cnt++;
}
// 把虚拟地址对应的位图比特标记空闲
vaddr_remove(pf, _vaddr, pg_cnt);
}
}
/// @brief 回收内存ptr
/// 拿到arena, 找到大小,释放。
/// @param ptr
void sys_free(void* ptr) {
ASSERT(ptr != NULL);
if (ptr != NULL) {
enum pool_flags PF;
struct pool* mem_pool;
// 判断是线程还是进程
if (running_thread()->pgdir == NULL) {
ASSERT((uint32_t)ptr >= K_HEAP_START);
PF = PF_KERNEL;
mem_pool = &kernel_pool;
} else {
PF = PF_USER;
mem_pool = &user_pool;
}
lock_acquire(&mem_pool->lock);
struct mem_block* b = ptr;
struct arena* a = block2arena(b); // 获取arena
ASSERT(a->large == 0 || a->large == 1);
// 大于1024的内存
if (a->desc == NULL && a->large == true) {
mfree_page(PF, a, a->cnt);
} else { // 小于等于1024的内存块
// 先将内存块回收到free_list
list_append(&a->desc->free_list, &b->free_elem);
a->cnt++; // arena 空闲块数+1
// 再判断此arena中的内存块是否都是空闲,如果是就释放arena所在的一个页
if (a->cnt == a->desc->blocks_per_arena) {
uint32_t block_idx;
for (block_idx = 0; block_idx < a->desc->blocks_per_arena; block_idx++) {
struct mem_block* b = arena2block(a, block_idx);
ASSERT(elem_find(&a->desc->free_list, &b->free_elem));
list_remove(&b->free_elem);
}
mfree_page(PF, a, 1);
}
}
lock_release(&mem_pool->lock);
}
}
/// @brief 初始化内存块描述数组
/// 创建了 16-1024 的7个块大小的描块描述
/// @param desc_array 内存块描述符数组首地址
void block_desc_init(struct mem_block_desc* desc_array) {
// 最小的块为16字节,然后不停翻倍
uint16_t block_size = 16;
// 初始化每个mem_block_desc描述符
uint16_t desc_idx;
for (desc_idx = 0; desc_idx < DESC_CNT; desc_idx++) {
// 初始化这个内存块描述符的块大小,块数量,可用块队列
desc_array[desc_idx].block_size = block_size;
desc_array[desc_idx].blocks_per_arena = (PG_SIZE - sizeof(struct arena)) / block_size;
list_init(&desc_array[desc_idx].free_list);
block_size *= 2; // 下一次内存块容量翻倍
}
}
#pragma endregion heap_manage
//---堆内存管理 end---
syscall.h
syscall.c
syscall-init.c
process.c
main.c
// 文件: main.c
// 时间: 2024-07-19
// 来自: ccj
// 描述: 内核从此处开始
#include "print.h"
#include "init.h"
#include "thread.h"
#include "interrupt.h"
#include "console.h"
#include "process.h"
#include "syscall.h"
#include "syscall-init.h"
#include "stdio.h"
#include "memory.h"
// 两个内核线程
void k_thread_a(void*);
void k_thread_b(void*);
// 两个用户进程
void u_prog_a(void);
void u_prog_b(void);
int main(void) {
put_str("I am kernel\n");
init_all();
process_execute(u_prog_a, "user_prog_a");
process_execute(u_prog_b, "user_prog_b");
thread_start("k_thread_a", 31, k_thread_a, "argA ");
thread_start("k_thread_b", 31, k_thread_b, "argB ");
intr_enable(); // 打开中断,使时钟中断起作用
while (1) {};
return 0;
}
// 内核线程函数
void k_thread_a(void* arg) {
void* addr1 = sys_malloc(256);
void* addr2 = sys_malloc(255);
void* addr3 = sys_malloc(254);
console_put_str("k_thread_a malloc addr:0x");
console_put_int((int)addr1);
console_put_char(',');
console_put_int((int)addr2);
console_put_char(',');
console_put_int((int)addr3);
console_put_char('\n');
int cpu_delay = 100000;
while (cpu_delay-- > 0);
sys_free(addr1);
sys_free(addr2);
sys_free(addr3);
while (1) {}
}
void k_thread_b(void* arg) {
void* addr1 = sys_malloc(256);
void* addr2 = sys_malloc(255);
void* addr3 = sys_malloc(254);
console_put_str("k_thread_b malloc addr:0x");
console_put_int((int)addr1);
console_put_char(',');
console_put_int((int)addr2);
console_put_char(',');
console_put_int((int)addr3);
console_put_char('\n');
int cpu_delay = 100000;
while (cpu_delay-- > 0);
sys_free(addr1);
sys_free(addr2);
sys_free(addr3);
while (1) {}
}
// 测试用户进程
void u_prog_a(void) {
void* addr1 = malloc(256);
void* addr2 = malloc(255);
void* addr3 = malloc(254);
printf("u_prog_a malloc addr:0x%x,0x%x,0x%x\n", (int)addr1, (int)addr2, (int)addr3);
int cpu_delay = 100000;
while (cpu_delay-- > 0);
free(addr1);
free(addr2);
free(addr3);
while (1) {}
}
void u_prog_b(void) {
void* addr1 = malloc(256);
void* addr2 = malloc(255);
void* addr3 = malloc(254);
printf("u_prog_b malloc addr:0x%x,0x%x,0x%x\n", (int)addr1, (int)addr2, (int)addr3);
int cpu_delay = 100000;
while (cpu_delay-- > 0);
free(addr1);
free(addr2);
free(addr3);
while (1) {}
}