2.Arm体系架构中进程切换过程
在之后的文章里,可能会有很大部分的篇幅是介绍内核如何调度和管理进程。学习了解这部分内容,很多时候是和task struct,run queue,schedule entity等数据结构打交道。关于linux进程调度的文章网上已经有很多,大多数都比较详细的介绍调度算法。调度算法的内容说白了就是在什么时间,内核会用什么样的进程替换目前正在运行的进程。而最后进程间的切换是如何完成的,介绍的文章较少。在此,我们在修炼高深内功之前,必须了解一下这个内功中的内功,打通一下任通二脉。下面介绍一下进程切换的过程。
/** context_switch - switch to the new MM and the new* thread's register state.*/staticinlinevoidcontext_switch(structrq *rq,structtask_struct *prev,structtask_struct *next){structmm_struct *mm, *oldmm;prepare_task_switch(rq, prev, next);trace_sched_switch(rq, prev, next);mm = next->mm;oldmm = prev->active_mm;/** For paravirt, this is coupled with an exit in switch_to to* combine the page table reload and the switch backend into* one hypercall.*/arch_enter_lazy_cpu_mode();if(unlikely(!mm)){next->active_mm = oldmm;atomic_inc(&oldmm->mm_count);enter_lazy_tlb(oldmm, next);}elseswitch_mm(oldmm, mm, next);if(unlikely(!prev->mm)){prev->active_mm = NULL;rq->prev_mm = oldmm;}/** Since the runqueue lock will be released by the next* task (which is an invalid locking op but in the case* of the scheduler it's an obvious special-case), so we* do an early lockdep release here:*/#ifndef__ARCH_WANT_UNLOCKED_CTXSWspin_release(&rq->lock.dep_map,1, _THIS_IP_);#endif/* Here we just switch the register state and the stack. */switch_to(prev, next, prev);barrier();/** this_rq must be evaluated again because prev may have moved* CPUs since it called schedule(), thus the 'rq' on its stack* frame will be invalid.*/finish_task_switch(this_rq(), prev);}
context_switch函数就是在进程调度过程中完成进程切换的函数,prev是被切换掉的进程,而next是切换到的进程。
代码首先判断切换掉的进程有没有用户空间,如果mm为空,表示next指向的进程是一个内核线程,根本就没有用户空间。这个时候,next进程根本就不会去访问用户空间的地址,因此,不需要更换MMU的页目录表。enter_lazy_tlb函数对于ARM架构就是一个空实现。如果next是个普通的用户进程,就执行switch_mm执行更换MMU的页目录表。
下面代码就是更换MMU页目录表的过程,其实很简单,就是操作设置新的页目录表的基地址到cp15协处理中。每个进程的页目录表基地址保存在mm_struct中的pgd中。
/** cpu_arm920_switch_mm(pgd)** Set the translation base pointer to be as described by pgd.** pgd: new page tables*/.align5ENTRY(cpu_arm920_switch_mm)#ifdefCONFIG_MMUmov ip, #0#ifdefCONFIG_CPU_DCACHE_WRITETHROUGHmcr p15,0, ip, c7, c6,0@ invalidate D cache#else@ &&'Clean & Invalidate whole DCache'@ && Re-written to use Index Ops.@ && Uses registers r1, r3 and ipmov r1, #(CACHE_DSEGMENTS -1) <<5@8segments1: orr r3, r1, #(CACHE_DENTRIES -1) <<26@64entries2: mcr p15,0, r3, c7, c14,2@ clean & invalidate D indexsubs r3, r3, #1 <<26bcs 2b @ entries63to0subs r1, r1, #1 <<5bcs 1b @ segments7to0#endifmcr p15,0, ip, c7, c5,0@ invalidate I cachemcr p15,0, ip, c7, c10,4@ drain WBmcr p15,0, r0, c2, c0,0@ load page table pointermcr p15,0, ip, c8, c7,0@ invalidate I & D TLBs#endifmov pc, lr
完成了页目录表的替换,再次回到context_switch函数中来,最后要完成进程切换,即切换到next进程的执行流程上去。
这个切换过程也比较简单,首先保存当前cpu现场,然后设置了一下cp15协处理器的domain寄存器,之后恢复next进程的cpu现场。切换到了next进程。
/*
* switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'. schedule() itself
* contains the memory barrier to tell GCC not to cache `current'.
*/
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
#define switch_to(prev,next,last)\
do {\
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));\
} while (0)
ENTRY(__switch_to)
addip, r1, #TI_CPU_SAVE
ldrr3, [r2, #TI_TP_VALUE]
stmiaip!, {r4 - sl, fp, sp, lr}@ Store most regs on stack
#ifdef CONFIG_MMU
ldrr6, [r2, #TI_CPU_DOMAIN]
#endif
#ifdef CONFIG_MMU
mcrp15, 0, r6, c3, c0, 0@ Set domain register
#endif
movr5, r0
addr4, r2, #TI_CPU_SAVE
ldrr0, =thread_notify_head
movr1, #THREAD_NOTIFY_SWITCH
blatomic_notifier_call_chain
movr0, r5
ldmiar4, {r4 - sl, fp, sp, pc}@ Load all regs saved previously
ENDPROC(__switch_to)