目录
一、CFS 完全公平调度
二、调度实现
1、时间记账
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;//当前进程的调度实体
u64 now = rq_clock_task(rq_of(cfs_rq));//时间
u64 delta_exec;
if (unlikely(!curr))
return;
delta_exec = now - curr->exec_start;//当前与上一次更新的两次时间差
if (unlikely((s64)delta_exec <= 0))
return;
curr->exec_start = now;//更新当前时间
schedstat_set(curr->statistics.exec_max,
max(delta_exec, curr->statistics.exec_max));
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);
...
}
delta_exec:当前进程的执行时间,参数有又递给calc_delta_fair。
calc_delta_fair 再根据当前可运行进程总数对运行时间进行加权计算,将最终权重值与当前进程的vruntimes相加。curr->vruntime += calc_delta_fair(delta_exec, curr);
update_curr是由系统定时器周期性调用的,无论进程在可运行还是阻塞状态,vruntime可以准确地计算出给定进程的运行时间,而且制定谁应该是下一个被运行进程
2、调度器入口
schedule->__schedule->pick_next_task->context_switch
struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
struct task_struct *p;
int new_tasks;
do {
struct sched_entity *curr = cfs_rq->curr;
if (curr) {
if (curr->on_rq)
update_curr(cfs_rq);
else
curr = NULL;
if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
cfs_rq = &rq->cfs;
if (!cfs_rq->nr_running)
goto idle;
goto simple;
}
}
//
se = pick_next_entity(cfs_rq, curr);
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
p = task_of(se);
if (prev != p) {
struct sched_entity *pse = &prev->se;
...
put_prev_entity(cfs_rq, pse);
set_next_entity(cfs_rq, se);
}
return NULL;
}
上下文切换本身通过调用两个特定于处理器的函数完成
1)switch_mm 更换通过task_struct->mm描述的内存管理上下文
2)switch_to切换处理器寄存器内容和内核栈,通常使用汇编编写
static __always_inline struct rq *
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next, struct rq_flags *rf)
{
prepare_task_switch(rq, prev, next);
arch_start_context_switch(prev);
if (!next->mm) { // to kernel
enter_lazy_tlb(prev->active_mm, next);
next->active_mm = prev->active_mm;
if (prev->mm) // from user
mmgrab(prev->active_mm);
else
prev->active_mm = NULL;
} else { // to user
membarrier_switch_mm(rq, prev->active_mm, next->mm);
switch_mm_irqs_off(prev->active_mm, next->mm, next);
if (!prev->mm) { // from kernel
/* will mmdrop() in finish_task_switch(). */
rq->prev_mm = prev->active_mm;
prev->active_mm = NULL;
}
}
prepare_lock_switch(rq, next, rf);
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
barrier();
return finish_task_switch(prev);
}
switch_to(prev, next, prev);//之后的代码只有在当前进程的下一次被选择运行时才会执行
barrier();//前后语句执行顺序不会因为任何可能的优化而改变
return finish_task_switch(prev);//完成清理工作,才能够正确释放锁
3、进程选择
实现函数pick_next_entity->__pick_next_entity
static struct sched_entity *__pick_next_entity(struct sched_entity *se)
{
struct rb_node *next = rb_next(&se->run_node);
if (!next)
return NULL;
return rb_entry(next, struct sched_entity, run_node);
}
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
/* 'current' is not kept within the tree. */
if (se->on_rq) {//当前运行进程不在红黑树上
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);//把新选出来的进程移除红黑树
update_load_avg(cfs_rq, se, UPDATE_TG);
}
update_stats_curr_start(cfs_rq, se);
cfs_rq->curr = se;//设置se为当前调度实体
if (schedstat_enabled() &&
rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
schedstat_set(se->statistics.slice_max,
max((u64)schedstat_val(se->statistics.slice_max),
se->sum_exec_runtime - se->prev_sum_exec_runtime));
}
se->prev_sum_exec_runtime = se->sum_exec_runtime;//记录总运行时间
}
4、睡眠和唤醒
check_preempt_wakeup->wakeup_preempt_entity
//判断新进程是否可以抢占当前进程
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;//se是当前进程,pse是新进程
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
int scale = cfs_rq->nr_running >= sched_nr_latency;
int next_buddy_marked = 0;
if (unlikely(se == pse))
return;
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
next_buddy_marked = 1;
}
if (test_tsk_need_resched(curr))
return;
/* Idle tasks are by definition preempted by non-idle tasks. */
if (unlikely(task_has_idle_policy(curr)) &&
likely(!task_has_idle_policy(p)))
goto preempt;
if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
return;
find_matching_se(&se, &pse);
update_curr(cfs_rq_of(se));
BUG_ON(!pse);
if (wakeup_preempt_entity(se, pse) == 1) {//判断新进程是否可以抢占
if (!next_buddy_marked)
set_next_buddy(pse);
goto preempt;
}
return;
preempt:
resched_curr(rq);
}
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
{
s64 gran, vdiff = curr->vruntime - se->vruntime;
if (vdiff <= 0) //新进程的vruntime值比当前进程的大, 则不发生抢占
return -1;
gran = wakeup_gran(curr); //获取调度粒度
if (vdiff > gran) //差值大于调度粒度时, 发生抢占
return 1;
return 0; //不发生抢占
}
三、CFS调度中vruntime
1)新进程vruntime的值
static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se, *curr;
struct rq *rq = this_rq();
struct rq_flags rf;
rq_lock(rq, &rf);
update_rq_clock(rq);
cfs_rq = task_cfs_rq(current);
curr = cfs_rq->curr;
if (curr) {
update_curr(cfs_rq);//更新当前进程的vruntime值
se->vruntime = curr->vruntime;//先以父进程的vruntime为基础
}
place_entity(cfs_rq, se, 1);//设置新进程的vruntime值,1代表是新进程
//sysctl_sched_child_runs_first值表示是否设置了让子进程先运行
if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
//当子进程的vruntime值大于父进程vruntime时,交换两个进程vruntime值
swap(curr->vruntime, se->vruntime);//
resched_curr(rq);//设置调度标志TIF_NEED_RESCHED
}
//为啥又减去min_vruntime??
se->vruntime -= cfs_rq->min_vruntime;
rq_unlock(rq, &rf);
}
减掉min_vruntime的数值,是因为你这个时候的新进程不跟CPU关联起来,实际在运行的时,可能会挂到其它CPU的CFS队列,如果两个进程CPU处理器的min_vruntime差距太大,会导致新进程优先级很低或者很高,主要是为了解决这个问题,在入新CPU处理顺路时,会再加上min_vruntime。新进程vruntime=父进程vruntime + (cpux_min_vruntime - cpuy_min_vruntime)。
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
u64 vruntime = cfs_rq->min_vruntime;//当前运行队列的min_vruntime值为基础
//START_DEBIT 表示新进程需要记账,需要加上新进程在一个调度周期内的vruntime值大小,表示新进程在这一调度周期已经运行过了
if (initial && sched_feat(START_DEBIT))
vruntime += sched_vslice(cfs_rq, se);
...
//该函数调用之前,执行了se->vruntime=curr->vruntime表明,新进程的runtime值是大于父进程的,所以父进程一般先运行,防止短期进程未获得补偿
se->vruntime = max_vruntime(se->vruntime, vruntime);
}
设置好时间后 接着执行 判断是否抢占wake_up_new_task->activate_task->check_preempt_wakeup->wakeup_preempt_entity
wakeup_preempt_entity在上面有介绍
2)阻塞的进程在调度时,runtime是多少?
activate_task–>enqueue_task_fair–>enqueue_entity
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
bool curr = cfs_rq->curr == se;
//睡眠进程出队列时,其runtime值没变,入队时先减去原来cfs_rq队列上此时的min_vruntime队列的min_vruntime值,再调用place_entity来调整
if (renorm && curr)
se->vruntime += cfs_rq->min_vruntime;//为什么加上min_vruntime
update_curr(cfs_rq);//更新当前进程的时间值
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;
if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);//调整睡眠进程的vruntime,0代表睡眠进程
if (!curr)
__enqueue_entity(cfs_rq, se);//将进程加入红黑树
se->on_rq = 1;
}
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
u64 vruntime = cfs_rq->min_vruntime;//当前运行队列的min_vruntime值为基础
//START_DEBIT 表示新进程需要记账,需要加上新进程在一个调度周期内的vruntime值大小,表示新进程在这一调度周期已经运行过了
if (initial && sched_feat(START_DEBIT))
vruntime += sched_vslice(cfs_rq, se);
/* sleeps up to a single latency don't count. */
if (!initial) {
unsigned long thresh = sysctl_sched_latency;//一个调度周期时间值
if (sched_feat(GENTLE_FAIR_SLEEPERS))
thresh >>= 1;//一半调度周期时间值
vruntime -= thresh;//对睡眠进程的vruntime补偿
}
se->vruntime = max_vruntime(se->vruntime, vruntime);
}
由于内核已经承诺,在当前的延迟周期内所有使用活动进程都至少运行一次,队列的min_vruntime用作基准虚拟时间,通过减去sysctl_sched_latency的一半,则可以确保新唤醒的进程只有在当前延迟周期结束后才能运行。
如果睡眠进程已经积累了比较大的不公平。则内核必须考虑。如果se->vruntime比计算的差值更大,则将其作为进程的vruntime,这会导致该进程在红黑树中比较靠左的位置。
3)周期调度,vruntime时间如何计算
对于单 CPU 而言, 默认设置是小于 8 个就绪任务时就按照 6ms, 大于 8 个就绪任务时, 每个任务给 0.75 ms.
static void
check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
unsigned long ideal_runtime, delta_exec;
struct sched_entity *se;
s64 delta;
//份额时间
ideal_runtime = sched_slice(cfs_rq, curr);
//已经运行的时间
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
//已经运行的时间大于份额则调度
if (delta_exec > ideal_runtime) {
resched_curr(rq_of(cfs_rq));
clear_buddies(cfs_rq, curr);
return;
}
//
if (delta_exec < sysctl_sched_min_granularity)
return;
//下一个调度实体
se = __pick_first_entity(cfs_rq);
//计算时间差值
delta = curr->vruntime - se->vruntime;
//差值小则返回
if (delta < 0)
return;
//差值大于份额,则调度
if (delta > ideal_runtime)
resched_curr(rq_of(cfs_rq));
}
确保没有哪个进程能够比延迟周期中确定的份额(给定的时间)运行时间更长
ideal_runtime:该份额对应的时间(给定的时间)
4)调度器优化变量
sysctl_sched_child_runs_first:child在fork之后调度的策略,如果为0 则先调用parent