实验一
g:
pushl %ebp//ebp入栈
movl %esp, %ebp//ebp=esp,生成新的堆栈空间
movl 8(%ebp), %eax//将变址寻址ebp+8所指向内存的值存入eax
addl $2823, %eax//将eax的值加2823
popl %ebp//ebp出栈,esp减去4,指向上一个位置
ret //返回,继续执行下一条指令
f:
pushl %ebp//ebp压栈,esp指向下一个位置
movl %esp, %ebp//将ebp指向esp的位置
subl $4, %esp //esp减4,即指向下一个位置
movl 8(%ebp), %eax//ebp向上移动两位,eax=8
movl %eax, (%esp)//eax内容存入esp指向的内存
call g//eip入栈,调用g
leave //包括movl %ebp,%esp和popl %ebp两条指令
ret //返回,继续执行下一条指令
main:
pushl %ebp//ebp压栈
movl %esp, %ebp//将ebp指向esp所指向的栈地址。
subl $4, %esp//esp指向的位置向下移动一格
movl $2, (%esp)//立即数2放到esp所在位置
call f//执行pushl eip和movl f eip两个动作,eip压栈,ebp指向下一个位置,eip指向f。
addl $1, %eax
leave //堆栈回到main函数最初的状态
ret //退出main
实验二
通过下述命令执行仅含时钟中断的mykernel内核,结果如下图所示。
cd ~/LinuxKernel/linux-3.9.4
rm -rf mykernel
patch -p1 < ../mykernel_for_linux3.9.4sc.patch
make allnoconfig
make
qemu -kernel arch/x86/boot/bzImage
之后重新修改了mymain.c和myinterrupt.c文件并且添加了mypcb.h文件,重新编译。
#define MAX_TASK_NUM 4
#define KERNEL_STACK_SIZE 1024*2
struct Thread {
unsigned long ip;
unsigned long sp;
};
typedef struct PCB{
int pid;
volatile long state;
unsigned long stack[KERNEL_STACK_SIZE];
struct Thread thread;
unsigned long task_entry;
struct PCB *next;
}tPCB;
void my_schedule(void);
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/tty.h>
#include <linux/vmalloc.h>
#include "mypcb.h"
tPCB task[MAX_TASK_NUM];
tPCB * my_current_task = NULL;
volatile int my_need_sched = 0;
void my_process(void);
void __init my_start_kernel(void)
{
int pid = 0;
int i;
task[pid].pid = pid;
task[pid].state = 0;
task[pid].task_entry = task[pid].thread.ip = (unsigned long)my_process;
task[pid].thread.sp = (unsigned long)&task[pid].stack[KERNEL_STACK_SIZE-1];
task[pid].next = &task[pid];
for(i=1;i<MAX_TASK_NUM;i++){
memcpy(&task[i],&task[0],sizeof(tPCB));
task[i].pid = i;
task[i].thread.sp = (unsigned long)(&task[i].stack[KERNEL_STACK_SIZE-1]);
task[i].next = task[i-1].next;
task[i-1].next = &task[i];
}
pid = 0;
my_current_task = &task[pid];
asm volatile(
"movl %1,%%esp\n\t"
"pushl %1\n\t"
"pushl %0\n\t"
"ret\n\t"
"popl %%ebp\n\t"
:
: "c" (task[pid].thread.ip),"d" (task[pid].thread.sp));
}
int i = 0;
void my_process(void)
{
while(1){
i++;
if(i%10000000 == 0){
printk(KERN_NOTICE "this is process %d -\n",my_current_task->pid);
if(my_need_sched == 1){
my_need_sched = 0;
my_schedule();
}
printk(KERN_NOTICE "this is process %d +\n",my_current_task->pid);
}
}
}
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/tty.h>
#include <linux/vmalloc.h>
#include "mypcb.h"
extern tPCB task[MAX_TASK_NUM];
extern tPCB * my_current_task;
extern volatile int my_need_sched;
volatile int time_count = 0;
/*
* * Called by timer interrupt.
* * it runs in the name of current running process,
* * so it use kernel stack of current running process
* */
void my_timer_handler(void){
#if 1
if(time_count%1000 == 0 && my_need_sched != 1){
printk(KERN_NOTICE ">>>my_timer_handler here<<<\n");
my_need_sched = 1;
}
time_count ++ ;
#endif
return;
}
void my_schedule(void){
tPCB * next;
tPCB * prev;
if(my_current_task == NULL || my_current_task->next == NULL){
return;
}
printk(KERN_NOTICE ">>>my_schedule<<<\n");
/* schedule */
next = my_current_task->next;
prev = my_current_task;
if(next->state == 0){
my_current_task = next;
printk(KERN_NOTICE ">>>switch %d to %d<<<\n",prev->pid,next->pid);
asm volatile(
"pushl %%ebp\n\t"
"movl %%esp,%0\n\t"
"movl %2,%%esp\n\t"
"movl $1f,%1\n\t"
"pushl %3\n\t"
"ret\n\t"
"1:\t"
"popl %%ebp\n\t"
: "=m" (prev->thread.sp),"=m" (prev->thread.ip)
: "m" (next->thread.sp),"m" (next->thread.ip));
}
return;
}
最后运行增加了时间片轮转的多道程序调度功能的mykernel内核,得到结果如下图。
总结
通过两次实验内容,使我了解到了汇编过程中堆栈的变化以及加深了我对操作系统的理解。