8、理解进程调度时机跟踪分析进程调度与进程切换的过程

版权声明:本文为博主原创文章,未经博主允许不得转载。
姓名:周毅原创作品转载请注明出处 《Linux内核分析》MOOC课程http://mooc.study.163.com/course/USTC-1000029000
一、进程的切换
我们以前分析过中断的大致过程如下图:
这里写图片描述
而进程的切换过程就在如图所示中主要通过schedule来完成切换:
最一般的情况:正在运行的用户态进程X切换到运行用户态进程Y的过程:
1、正在运行的用户态进程X
2、发生中断——save cs:eip/esp/eflags(current) to kernel stack,then load cs:eip(entry of a specific ISR) and ss:esp(point to kernel stack).
3、SAVE_ALL //保存现场
4、中断处理过程中或中断返回前调用了schedule(),其中的switch_to做了关键的进程上下文切换
5、标号1之后开始运行用户态进程Y(这里Y曾经通过以上步骤被切换出去过因此可以从标号1继续执行)
6、restore_all //恢复现场
7、iret - pop cs:eip/ss:esp/eflags from kernel stack
8、继续运行用户态进程Y

几种特殊情况:
1、通过中断处理过程中的调度时机,用户态进程与内核线程之间互相切换和内核线程之间互相切换,与最一般的情况非常类似,只是内核线程运行过程中发生中断没有进程用户态和内核态的转换;
2、内核线程主动调用schedule(),只有进程上下文的切换,没有发生中断上下文的切换,与最一般的情况略简略;
3、创建子进程的系统调用在子进程中的执行起点及返回用户态,如fork;
4、加载一个新的可执行程序后返回到用户态的情况,如execve;
二、schedule()分析
要知道进程切换做了什么,我们就需要分析schedule()做了什么:
sechedule定义在linux-3.18.6/kernel/sched/core.c中:

asmlinkage __visible void __sched schedule(void)
{
    struct task_struct *tsk = current;//当前进程地址

    sched_submit_work(tsk);//提交调度工作
    __schedule();//执行调度
}

可以看到,通过__schedule()执行调度,代码如下(主要过程是我注释的那几行):

static void __sched __schedule(void)
{
    struct task_struct *prev, *next;
    unsigned long *switch_count;
    struct rq *rq;
    int cpu;

need_resched:
    preempt_disable();
    cpu = smp_processor_id();
    rq = cpu_rq(cpu);
    rcu_note_context_switch(cpu);
    prev = rq->curr;

    schedule_debug(prev);

    if (sched_feat(HRTICK))
        hrtick_clear(rq);

    /*
     * Make sure that signal_pending_state()->signal_pending() below
     * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
     * done by the caller to avoid the race with signal_wake_up().
     */
    smp_mb__before_spinlock();
    raw_spin_lock_irq(&rq->lock);

    switch_count = &prev->nivcsw;
    if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
        if (unlikely(signal_pending_state(prev->state, prev))) {
            prev->state = TASK_RUNNING;
        } else {
            deactivate_task(rq, prev, DEQUEUE_SLEEP);
            prev->on_rq = 0;

            /*
             * If a worker went to sleep, notify and ask workqueue
             * whether it wants to wake up a task to maintain
             * concurrency.
             */
            if (prev->flags & PF_WQ_WORKER) {
                struct task_struct *to_wakeup;

                to_wakeup = wq_worker_sleeping(prev, cpu);
                if (to_wakeup)
                    try_to_wake_up_local(to_wakeup);
            }
        }
        switch_count = &prev->nvcsw;
    }

    if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
        update_rq_clock(rq);

    next = pick_next_task(rq, prev);//进程调度算法都封装这个函数内部
    clear_tsk_need_resched(prev);
    clear_preempt_need_resched();
    rq->skip_clock_update = 0;

    if (likely(prev != next)) {
        rq->nr_switches++;
        rq->curr = next;
        ++*switch_count;

        context_switch(rq, prev, next); /* unlocks the rq 进程上下文的切换*/
        /*
         * The context switch have flipped the stack from under us
         * and restored the local variables which were saved when
         * this task called schedule() in the past. prev == current
         * is still correct, but it can be moved to another cpu/rq.
         */
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
    } else
        raw_spin_unlock_irq(&rq->lock);

    post_schedule(rq);

    sched_preempt_enable_no_resched();
    if (need_resched())
        goto need_resched;
}

主要通过这两句进行切换:
next = pick_next_task(rq, prev);//进程调度算法都封装这个函数内部
context_switch(rq, prev, next);//进程上下文切换
我们主要看看进程上下文切换的context_switch函数:

static inline void
context_switch(struct rq *rq, struct task_struct *prev,
           struct task_struct *next)
{
    struct mm_struct *mm, *oldmm;//地址空间

    prepare_task_switch(rq, prev, next);//任务切换准备工作

    mm = next->mm;//修改地址空间
    oldmm = prev->active_mm;
    /*
     * For paravirt, this is coupled with an exit in switch_to to
     * combine the page table reload and the switch backend into
     * one hypercall.
     */
    arch_start_context_switch(prev);

    if (!mm) {
        next->active_mm = oldmm;
        atomic_inc(&oldmm->mm_count);
        enter_lazy_tlb(oldmm, next);
    } else
        switch_mm(oldmm, mm, next);//地址空间切换

    if (!prev->mm) {
        prev->active_mm = NULL;
        rq->prev_mm = oldmm;
    }
    /*
     * Since the runqueue lock will be released by the next
     * task (which is an invalid locking op but in the case
     * of the scheduler it's an obvious special-case), so we
     * do an early lockdep release here:
     */
    spin_release(&rq->lock.dep_map, 1, _THIS_IP_);

    context_tracking_task_switch(prev, next);
    /* Here we just switch the register state and the stack. */
    switch_to(prev, next, prev);  //进行切换

    barrier();
    /*
     * this_rq must be evaluated again because prev may have moved
     * CPUs since it called schedule(), thus the 'rq' on its stack
     * frame will be invalid.
     */
    finish_task_switch(this_rq(), prev);
}

上述过程主要完成了地址空间的切换和一些准备工作,然后进入switch_to进行进程上下文切换,switch_to是一个宏定义,定义在linux-3.18.6/arch/x86/include/asm/switch_to.h中:

#define switch_to(prev, next, last)                 \
do {                                    \
    /*                              \
     * Context-switching clobbers all registers, so we clobber  \
     * them explicitly, via unused output variables.        \
     * (EAX and EBP is not listed because EBP is saved/restored \
     * explicitly for wchan access and EAX is the return value of   \
     * __switch_to())                       \
     */                             \
    unsigned long ebx, ecx, edx, esi, edi;              \
                                    \
    asm volatile(
            "pushfl\n\t"        /* save    flags */ \
             "pushl %%ebp\n\t"      /* save    EBP   */ \
             "movl %%esp,%[prev_sp]\n\t"    /* save    ESP   */ \
             "movl %[next_sp],%%esp\n\t"    /* restore ESP   */ \
             "movl $1f,%[prev_ip]\n\t" /* save    EIP   */ \
             "pushl %[next_ip]\n\t" /* restore EIP   */ \
             __switch_canary                    \
             "jmp __switch_to\n"    /* regparm call  */ \
             "1:\t"                     \
             "popl %%ebp\n\t"       /* restore EBP   */ \
             "popfl\n"          /* restore flags */ \
                                    \
             /* output parameters */                \
             : [prev_sp] "=m" (prev->thread.sp),        \
               [prev_ip] "=m" (prev->thread.ip),        \
               "=a" (last),                 \
                                    \
               /* clobbered output registers: */        \
               "=b" (ebx), "=c" (ecx), "=d" (edx),      \
               "=S" (esi), "=D" (edi)               \
                                        \
               __switch_canary_oparam               \
                                    \
               /* input parameters: */              \
             : [next_sp]  "m" (next->thread.sp),        \
               [next_ip]  "m" (next->thread.ip),        \
                                        \
               /* regparm parameters for __switch_to(): */  \
               [prev]     "a" (prev),               \
               [next]     "d" (next)                \
                                    \
               __switch_canary_iparam               \
                                    \
             : /* reloaded segment registers */         \
            "memory");                  \
} while (0)

我们主要分析这段汇编代码:
其中:
[next_sp] “m” (next->thread.sp)//next_sp字符串表示next->thread.sp
[next_ip] “m” (next->thread.ip)//next_ip字符串表示next->thread.ip
[prev] “a” (prev), //prev进程
[next] “d” (next) //next进程
那么分析过程如下:
“pushfl\n\t” /* 保存flags*/
“pushl %%ebp\n\t” /* 保存prev栈底ebp */ \
“movl %%esp,%[prev_sp]\n\t” /* 保存prev栈顶esp */ \
“movl %[next_sp],%%esp\n\t” /* 恢复next->sp到栈顶esp中 */ \
“movl $1f,%[prev_ip]\n\t” /* 保存1:地址到prev->ip中,实际上是当进程切换回到prev时,从1:处开始执行 */ \
“pushl %[next_ip]\n\t” /* 恢复next->ip,实际上是将next进程的运行地址压栈 */ \
__switch_canary \
“jmp __switch_to\n” /跳转到__switch_to函数处执行 / \
“1:\t” \
“popl %%ebp\n\t” /* 恢复 EBP */ \
“popfl\n” /* 恢复 flags */ \

实际上上述过程主要是prev进程的进程堆栈寄存器和运行断点的保存,然后切换至next进程的堆栈寄存器和断点,而__switch_to函数主要做了一些硬件环境的切换,至此schedule函数的主要过程执行完毕,然后通过中断的Iret恢复执行next进程。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值