学号271原创作品,转载请注明出处。
本次实验基于Ubuntu 18.04 使用qemu 模拟内核运行
本实验资源来源: https://github.com/mengning/linuxkernel/
实验步骤:
- mkdir mykernel //创建一个单独的目录
- 下载内核代码,由于直接在Ubuntu下载的有点慢,所以用了百度网盘提取码: wd9q
- cd mkdir 并解压linux-3.9.4.tar.xz
xz -d linux-3.9.4.tar.xz tar -xvf linux-3.9.4.tar
- 下载补丁 wget https://github.com/mengning/mykernel/blob/master/mykernel_for_linux3.9.4sc.patch
- 打补丁并编译
1.安装一些必要的包,sudo apt install qemu flex bison -y 2.打补丁:patch -p1 < ../mykernel_for_linux3.9.4sc.patch 3.make allnoconfig //所有的选项选NO 4.make 5.关于编译,由于缺少gcc5.h文件,会编译失败,所以编译前可以将自己系统的gcc.h文件copy过去 cp /usr/src/linux-headers-xx-generic/include/linux/compiler-gcc.h include/linux/compiler-gcc5.h 6.其次,如果ubuntu更新比较慢的话可以修改一下源,具体参考https://blog.csdn.net/zhangmeimei_pku/article/details/79597951
编译完成后,执行 qemu-system-i386 -kernel arch/x86/boot/bzImage
sudo apt install git -y
git clone https://github.com/mengning/mykernel
替换相应的文件,即my_interrupt.c mymain.c mypcb.h,其中mypcb.h大约11行出有一处有错# unsigned long,需要注释掉
重新编译并执行qemu-system-i386 -kernel arch/x86/boot/bzImage
实验分析
- mypcb.h定义了PCB的结构体,记录了进程的相关信息。在本次实验中是用单项循环链表链接,可以很明显的看到进程控制块的一些重要信息,比如pid,进程状态,栈空间,进程入口,指向下一个PCB的指针,其中,thread.ip指向要执行的指令,thread.sp指向对应的栈顶指针。
struct Thread {
unsigned long ip; //eip
unsigned long sp; //esp
};
typedef struct PCB{
int pid;
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
unsigned long stack[KERNEL_STACK_SIZE];
/* CPU-specific state of this task */
struct Thread thread;
unsigned long task_entry; /*进程入口*/
struct PCB *next; /*下一个进程*/
}tPCB;
void my_schedule(void); //声明进程调度函数
2.mymain.c
在内核启动时会先运行my_start_kernel(void)函数,先生成一个pid=0的父进程的PCB,然后在fork其余的进程PCB,最后用汇编程序将pid=0相关的PCB内容入栈,程序开始执行。
然后my_process()通过判断标识位my_need_sched来判断是否要进行调度
tPCB task[MAX_TASK_NUM];
tPCB * my_current_task = NULL;
volatile int my_need_sched = 0;
void my_process(void);
void __init my_start_kernel(void)
{
int pid = 0;
int i;
/* Initialize process 0*/
task[pid].pid = pid;
task[pid].state = 0;/* -1 unrunnable, 0 runnable, >0 stopped */
task[pid].task_entry = task[pid].thread.ip = (unsigned long)my_process;
task[pid].thread.sp = (unsigned long)&task[pid].stack[KERNEL_STACK_SIZE-1];
task[pid].next = &task[pid];
/*fork more process */
for(i=1;i<MAX_TASK_NUM;i++)
{
memcpy(&task[i],&task[0],sizeof(tPCB));
task[i].pid = i;
//*(&task[i].stack[KERNEL_STACK_SIZE-1] - 1) = (unsigned long)&task[i].stack[KERNEL_STACK_SIZE-1];
task[i].thread.sp = (unsigned long)(&task[i].stack[KERNEL_STACK_SIZE-1]);
task[i].next = task[i-1].next;
task[i-1].next = &task[i];
}
/* start process 0 by task[0] */
pid = 0;
my_current_task = &task[pid];
asm volatile(
"movl %1,%%esp\n\t" /* set task[pid].thread.sp to esp */
"pushl %1\n\t" /* push ebp */
"pushl %0\n\t" /* push task[pid].thread.ip */
"ret\n\t" /* pop task[pid].thread.ip to eip */
:
: "c" (task[pid].thread.ip),"d" (task[pid].thread.sp) /* input c or d mean %ecx/%edx*/
);
}
int i = 0;
void my_process(void)
{
while(1)
{
i++;
if(i%10000000 == 0)
{
printk(KERN_NOTICE "this is process %d -\n",my_current_task->pid);
if(my_need_sched == 1)
{
my_need_sched = 0;
my_schedule();
}
printk(KERN_NOTICE "this is process %d +\n",my_current_task->pid);
}
}
}
3.my_interrupt.c
my_timer_handler(void)系统会自动调用,每隔1000ms会发生一次中断,并且会把my_need_sched的值设置为1,以便my_process()调用my_schedule()
my_schedule(void)主要负责进程的切换,讲述了如何保护现场和调度的问题。
extern tPCB task[MAX_TASK_NUM];
extern tPCB * my_current_task;
extern volatile int my_need_sched;
volatile int time_count = 0;
/*
* Called by timer interrupt.
* it runs in the name of current running process,
* so it use kernel stack of current running process
*/
void my_timer_handler(void)
{
#if 1
if(time_count%1000 == 0 && my_need_sched != 1)
{
printk(KERN_NOTICE ">>>my_timer_handler here<<<\n");
my_need_sched = 1;
}
time_count ++ ;
#endif
return;
}
void my_schedule(void)
{
tPCB * next;
tPCB * prev;
if(my_current_task == NULL
|| my_current_task->next == NULL)
{
return;
}
printk(KERN_NOTICE ">>>my_schedule<<<\n");
/* schedule */
next = my_current_task->next;
prev = my_current_task;
if(next->state == 0)/* -1 unrunnable, 0 runnable, >0 stopped */
{
my_current_task = next;
printk(KERN_NOTICE ">>>switch %d to %d<<<\n",prev->pid,next->pid);
/* switch to next process */
asm volatile(
"pushl %%ebp\n\t" /* save ebp */
"movl %%esp,%0\n\t" /* save esp */
"movl %2,%%esp\n\t" /* restore esp */
"movl $1f,%1\n\t" /* save eip */
"pushl %3\n\t"
"ret\n\t" /* restore eip */
"1:\t" /* next process start here */
"popl %%ebp\n\t"
: "=m" (prev->thread.sp),"=m" (prev->thread.ip)
: "m" (next->thread.sp),"m" (next->thread.ip)
);
}
return;
}
在这里要特殊说明一下,汇编代码这一部分,在 mymain.c中也有一部分,但是my_interrupt.c上的更全一些。所以就一这一部分代码讲解一下:
asm volatile(
"pushl %%ebp\n\t" /* 保护当前进程EBP入栈 */
"movl %%esp,%0\n\t" /* 保护当前ESP到PCB */
"movl %2,%%esp\n\t" /* next进程的栈顶地址放入ESP寄存器*/
"movl $1f,%1\n\t" /* 保存当前进程的EIP到 */
"pushl %3\n\t" /*把即将进栈的代码入口地址地址入栈*/
"ret\n\t" /* restore eip */
"1:\t" /* next process start here */
"popl %%ebp\n\t"
: "=m" (prev->thread.sp),"=m" (prev->thread.ip) /*output*/
: "m" (next->thread.sp),"m" (next->thread.ip) /*input*/
);
这里要特殊说明一下,%%+寄存器,比如%%eip第一个%是转义字符,指的是寄存器.
%+数字,比如%1,指的是下面以:开头的变量,编号从零开始
$1f,指的是下面“1:\t"的位置,next process start here
实验总结
本次实验在让我认识到了,操作系统进程如何的切换以及进程的如何调度,并且对进程切换时如何通过PCB,栈空间,寄存器进行现场保护,如何完成进程切换有了深刻的理解。通过观察linux内核源代码对进程的中断,进程的创建,父进程与子进程之间的冠以有了一定的了解。